diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..dc1e66f31c62fc32b88c636cb1dac76abfd7bc78 --- /dev/null +++ b/app.py @@ -0,0 +1,152 @@ +import argparse +import os +import random + +import numpy as np +import torch +import gradio as gr + +from minigpt4.common.config import Config +from minigpt4.common.dist_utils import get_rank +from minigpt4.common.registry import registry +from minigpt4.conversation.conversation import Chat, CONV_VISION + +# imports modules for registration +from minigpt4.datasets.builders import * +from minigpt4.models import * +from minigpt4.processors import * +from minigpt4.runners import * +from minigpt4.tasks import * + +def parse_args(): + parser = argparse.ArgumentParser(description="Demo") + parser.add_argument("--cfg-path", type=str, default='eval_configs/tinygptv_stage1_2_3_eval.yaml', help="path to configuration file.") + parser.add_argument( + "--options", + nargs="+", + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file (deprecate), " + "change to --cfg-options instead.", + ) + args = parser.parse_args() + return args + + +def setup_seeds(config): + seed = config.run_cfg.seed + get_rank() + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + +# ======================================== +# Model Initialization +# ======================================== + +SHARED_UI_WARNING = f'''### [NOTE] It is possible that you are waiting in a lengthy queue. + +You can duplicate and use it with a paid private GPU. + +Duplicate Space + +Alternatively, you can also use the demo on our [project page](https://minigpt-4.github.io). +''' + +print('Initializing Chat') +cfg = Config(parse_args()) + +model_config = cfg.model_cfg +model_cls = registry.get_model_class(model_config.arch) +model = model_cls.from_config(model_config).to('cpu') + +vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train +vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) +chat = Chat(model, vis_processor,device='cpu') +print('Initialization Finished') + +# ======================================== +# Gradio Setting +# ======================================== + +def gradio_reset(chat_state, img_list): + if chat_state is not None: + chat_state.messages = [] + if img_list is not None: + img_list = [] + return None, gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your image first', interactive=False), gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list + +def upload_img(gr_img, text_input, chat_state): + if gr_img is None: + return None, None, gr.update(interactive=True), chat_state, None + chat_state = CONV_VISION.copy() + img_list = [] + llm_message = chat.upload_img(gr_img, chat_state, img_list) + + return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list + +def gradio_ask(user_message, chatbot, chat_state): + if len(user_message) == 0: + return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state + chat.ask(user_message, chat_state) + chatbot = chatbot + [[user_message, None]] + return '', chatbot, chat_state + + +def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature): + llm_message = chat.answer(conv=chat_state, img_list=img_list, max_new_tokens=300, num_beams=1, temperature=temperature, max_length=2000)[0] + chatbot[-1][1] = llm_message + return chatbot, chat_state, img_list + +title = """

Demo of TinyGPT-V

""" +description = """

This is the demo of TinyGPT-V. Upload your images and start chatting!

""" +article = """
+""" + +#TODO show examples below + +with gr.Blocks() as demo: + gr.Markdown(title) + + gr.Markdown(description) + gr.Markdown(article) + + with gr.Row(): + with gr.Column(scale=0.5): + image = gr.Image(type="pil") + upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary") + clear = gr.Button("Restart") + + num_beams = gr.Slider( + minimum=1, + maximum=5, + value=1, + step=1, + interactive=True, + label="beam search numbers)", + ) + + temperature = gr.Slider( + minimum=0.1, + maximum=2.0, + value=1.0, + step=0.1, + interactive=True, + label="Temperature", + ) + + + with gr.Column(): + chat_state = gr.State() + img_list = gr.State() + chatbot = gr.Chatbot(label='TinyGPT-V') + text_input = gr.Textbox(label='User', placeholder='Please upload your image first', interactive=False) + + upload_button.click(upload_img, [image, text_input, chat_state], [image, text_input, upload_button, chat_state, img_list]) + + text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then( + gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list] + ) + clear.click(gradio_reset, [chat_state, img_list], [chatbot, image, text_input, upload_button, chat_state, img_list], queue=False) + +demo.launch(enable_queue=True) \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..507285c2399154fefdfcc68231da7f6257f21280 --- /dev/null +++ b/environment.yml @@ -0,0 +1,183 @@ +name: tinygptv-demo +channels: + - defaults + +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - ca-certificates=2023.08.22=h06a4308_0 + - cudatoolkit=11.8.0=h6a678d5_0 + - ld_impl_linux-64=2.38=h1181459_1 + - libffi=3.4.4=h6a678d5_0 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libstdcxx-ng=11.2.0=h1234567_1 + - ncurses=6.4=h6a678d5_0 + - openssl=3.0.12=h7f8727e_0 + - pip=23.3.1=py39h06a4308_0 + - python=3.9.18=h955ad1f_0 + - readline=8.2=h5eee18b_0 + - setuptools=68.2.2=py39h06a4308_0 + - sqlite=3.41.2=h5eee18b_0 + - tk=8.6.12=h1ccaba5_0 + - wheel=0.41.2=py39h06a4308_0 + - xz=5.4.5=h5eee18b_0 + - zlib=1.2.13=h5eee18b_0 + - pip: + - accelerate==0.20.3 + - aiofiles==23.2.1 + - aiohttp==3.9.1 + - aiosignal==1.3.1 + - altair==5.2.0 + - annotated-types==0.6.0 + - antlr4-python3-runtime==4.9.3 + - anyio==3.7.1 + - appdirs==1.4.4 + - asttokens==2.4.1 + - async-timeout==4.0.3 + - attrs==23.1.0 + - bitsandbytes==0.37.0 + - braceexpand==0.1.7 + - certifi==2023.11.17 + - charset-normalizer==3.3.2 + - click==8.1.7 + - cmake==3.28.1 + - comm==0.2.0 + - contourpy==1.2.0 + - cycler==0.12.1 + - datasets==2.15.0 + - debugpy==1.8.0 + - decorator==5.1.1 + - decord==0.6.0 + - dill==0.3.7 + - docker-pycreds==0.4.0 + - einops==0.7.0 + - exceptiongroup==1.2.0 + - executing==2.0.1 + - fastapi==0.105.0 + - ffmpy==0.3.1 + - filelock==3.13.1 + - fonttools==4.46.0 + - frozenlist==1.4.1 + - fsspec==2023.10.0 + - gitdb==4.0.11 + - gitpython==3.1.40 + - gradio==3.47.1 + - gradio-client==0.6.0 + - h11==0.14.0 + - httpcore==1.0.2 + - httpx==0.25.2 + - huggingface-hub==0.19.4 + - idna==3.6 + - imageio==2.33.1 + - importlib-metadata==7.0.0 + - importlib-resources==6.1.1 + - iopath==0.1.10 + - ipykernel==6.27.1 + - ipython==8.18.1 + - jedi==0.19.1 + - jinja2==3.1.2 + - joblib==1.3.2 + - jsonschema==4.20.0 + - jsonschema-specifications==2023.11.2 + - jupyter-client==8.6.0 + - jupyter-core==5.5.1 + - kiwisolver==1.4.5 + - lazy-loader==0.3 + - lit==17.0.6 + - markupsafe==2.1.3 + - matplotlib==3.7.0 + - matplotlib-inline==0.1.6 + - mpmath==1.3.0 + - multidict==6.0.4 + - multiprocess==0.70.15 + - nest-asyncio==1.5.8 + - networkx==3.2.1 + - nltk==3.8.1 + - numpy==1.26.2 + - nvidia-cublas-cu11==11.10.3.66 + - nvidia-cuda-cupti-cu11==11.7.101 + - nvidia-cuda-nvrtc-cu11==11.7.99 + - nvidia-cuda-runtime-cu11==11.7.99 + - nvidia-cudnn-cu11==8.5.0.96 + - nvidia-cufft-cu11==10.9.0.58 + - nvidia-curand-cu11==10.2.10.91 + - nvidia-cusolver-cu11==11.4.0.1 + - nvidia-cusparse-cu11==11.7.4.91 + - nvidia-nccl-cu11==2.14.3 + - nvidia-nvtx-cu11==11.7.91 + - omegaconf==2.3.0 + - opencv-python==4.7.0.72 + - orjson==3.9.10 + - packaging==23.2 + - pandas==2.1.4 + - parso==0.8.3 + - peft==0.2.0 + - pexpect==4.9.0 + - pillow==10.1.0 + - platformdirs==4.1.0 + - portalocker==2.8.2 + - progressbar2==4.3.0 + - prompt-toolkit==3.0.43 + - protobuf==4.25.1 + - psutil==5.9.4 + - ptyprocess==0.7.0 + - pure-eval==0.2.2 + - pyarrow==14.0.2 + - pyarrow-hotfix==0.6 + - pydantic==2.5.2 + - pydantic-core==2.14.5 + - pydub==0.25.1 + - pygments==2.17.2 + - pyparsing==3.1.1 + - python-dateutil==2.8.2 + - python-multipart==0.0.6 + - python-utils==3.8.1 + - pytz==2023.3.post1 + - pyyaml==6.0 + - pyzmq==25.1.2 + - referencing==0.32.0 + - regex==2022.10.31 + - requests==2.31.0 + - rpds-py==0.15.2 + - safetensors==0.4.1 + - scikit-image==0.22.0 + - scikit-learn==1.3.2 + - scipy==1.11.4 + - semantic-version==2.10.0 + - sentence-transformers==2.2.2 + - sentencepiece==0.1.99 + - sentry-sdk==1.39.1 + - setproctitle==1.3.3 + - six==1.16.0 + - smmap==5.0.1 + - sniffio==1.3.0 + - stack-data==0.6.3 + - starlette==0.27.0 + - sympy==1.12 + - threadpoolctl==3.2.0 + - tifffile==2023.12.9 + - timm==0.6.13 + - tokenizers==0.15.0 + - toolz==0.12.0 + - torch==2.0.0 + - torchaudio==2.0.1 + - torchvision==0.15.1 + - tornado==6.4 + - tqdm==4.64.1 + - traitlets==5.14.0 + - transformers==4.36.2 + - triton==2.0.0 + - typing-extensions==4.9.0 + - tzdata==2023.3 + - urllib3==2.1.0 + - uvicorn==0.24.0.post1 + - visual-genome==1.1.1 + - wandb==0.16.1 + - wcwidth==0.2.12 + - webdataset==0.2.48 + - websockets==11.0.3 + - xxhash==3.4.1 + - yarl==1.9.4 + - zipp==3.17.0 + diff --git a/eval_configs/benchmark_evaluation.yaml b/eval_configs/benchmark_evaluation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33017fbce3330d9b8d47ed6ad4d3b9e65bbfd198 --- /dev/null +++ b/eval_configs/benchmark_evaluation.yaml @@ -0,0 +1,60 @@ +model: + arch: minigpt_v2 + model_type: pretrain + max_txt_len: 500 + end_sym: "###" + low_resource: False + prompt_template: 'Instruct: {} /n Output: ' + llama_model: "" + ckpt: "" + lora_r: 64 + lora_alpha: 16 + + + +datasets: + cc_sbu_align: + vis_processor: + train: + name: "blip2_image_eval" + image_size: 448 + text_processor: + train: + name: "blip_caption" + +evaluation_datasets: + gqa: + eval_file_path: /root/autodl-tmp/evaluation/gqa/annotations/testdev_balanced_questions.json + img_path: /root/autodl-tmp/evaluation/gqa/images + max_new_tokens: 20 + batch_size: 10 + vizwiz: + eval_file_path: /root/autodl-tmp/evaluation/vizwiz/val.json + img_path: /root/autodl-tmp/evaluation/vizwiz/val + max_new_tokens: 20 + batch_size: 10 + iconvqa: + eval_file_path: /root/autodl-tmp/evaluation/iconqa/iconqa_data/problems.json + img_path: /root/autodl-tmp/evaluation/iconqa/iconqa_data/iconqa + max_new_tokens: 20 + batch_size: 1 + vsr: + eval_file_path: /root/autodl-tmp/evaluation/vsr/dev.jsonl + img_path: /root/autodl-tmp/coco2017/train + max_new_tokens: 20 + batch_size: 10 + hm: + eval_file_path: /root/autodl-tmp/evaluation/Hateful_Memes/data/dev.jsonl + img_path: /root/autodl-tmp/evaluation/Hateful_Memes/data + max_new_tokens: 20 + batch_size: 10 + +run: + task: image_text_pretrain + name: minigptv2_evaluation + save_path: /root/MiniGPT-4/save_evalution + + + + + diff --git a/eval_configs/tinygptv_stage1_2_3_eval.yaml b/eval_configs/tinygptv_stage1_2_3_eval.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa58de008a31013e7f26be7f3b26aa30aeb697c3 --- /dev/null +++ b/eval_configs/tinygptv_stage1_2_3_eval.yaml @@ -0,0 +1,24 @@ +model: + arch: minigpt4 + model_type: pretrain_vicuna0 + max_txt_len: 160 + bos_token_id: "###" + low_resource: False + prompt_template: '###Human: {} ###Assistant: ' + ckpt: 'TinyGPT-V_for_Stage3.pth' + lora_r: 64 + lora_alpha: 16 + + +datasets: + cc_sbu_align: + vis_processor: + train: + name: "blip2_image_eval" + image_size: 224 + text_processor: + train: + name: "blip_caption" + +run: + task: image_text_pretrain diff --git a/eval_configs/tinygptv_stage4_eval.yaml b/eval_configs/tinygptv_stage4_eval.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53caffbd3297fb28679bcc3b69a3cc9df0a4d4cc --- /dev/null +++ b/eval_configs/tinygptv_stage4_eval.yaml @@ -0,0 +1,24 @@ +model: + arch: minigpt_v2 + model_type: pretrain + max_txt_len: 500 + bos_token_id: "###" + low_resource: False + prompt_template: 'Instruct: {} /n Output: ' + ckpt: "/home/li0007xu/LLM/TinyGPT-V/TinyGPT-V_for_Stage4.pth" + lora_r: 64 + lora_alpha: 16 + + +datasets: + cc_sbu_align: + vis_processor: + train: + name: "blip2_image_eval" + image_size: 448 + text_processor: + train: + name: "blip_caption" + +run: + task: image_text_pretrain diff --git a/examples/TinyGPT-V-ST.png b/examples/TinyGPT-V-ST.png new file mode 100644 index 0000000000000000000000000000000000000000..0a236c7220c2e8ed270a247fd7d21b3d00f36601 Binary files /dev/null and b/examples/TinyGPT-V-ST.png differ diff --git a/examples/Training_S.png b/examples/Training_S.png new file mode 100644 index 0000000000000000000000000000000000000000..6fa2a8b3fbf7d632d7117c8207e71d94f64bb862 Binary files /dev/null and b/examples/Training_S.png differ diff --git a/examples/result.png b/examples/result.png new file mode 100644 index 0000000000000000000000000000000000000000..df1c7c378e5d85177e62b77ef063e25faee70fe7 Binary files /dev/null and b/examples/result.png differ diff --git a/minigpt4/__init__.py b/minigpt4/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb31f42f9107a0b748b878deb1c5768019d62b32 --- /dev/null +++ b/minigpt4/__init__.py @@ -0,0 +1,31 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import os +import sys + +from omegaconf import OmegaConf + +from minigpt4.common.registry import registry + +from minigpt4.datasets.builders import * +from minigpt4.models import * +from minigpt4.processors import * +from minigpt4.tasks import * + + +root_dir = os.path.dirname(os.path.abspath(__file__)) +default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) + +registry.register_path("library_root", root_dir) +repo_root = os.path.join(root_dir, "..") +registry.register_path("repo_root", repo_root) +cache_root = os.path.join(repo_root, default_cfg.env.cache_root) +registry.register_path("cache_root", cache_root) + +registry.register("MAX_INT", sys.maxsize) +registry.register("SPLIT_NAMES", ["train", "val", "test"]) diff --git a/minigpt4/__pycache__/__init__.cpython-39.pyc b/minigpt4/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6669b02f8ebc93822560190b6a0f901a914eaf6f Binary files /dev/null and b/minigpt4/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/common/__init__.py b/minigpt4/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt4/common/__pycache__/__init__.cpython-39.pyc b/minigpt4/common/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04dbb900d4c234e69342c379e7eb1394d80df2ba Binary files /dev/null and b/minigpt4/common/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/common/__pycache__/config.cpython-39.pyc b/minigpt4/common/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e3af019140f31248df20c33f353f494eb73cde9 Binary files /dev/null and b/minigpt4/common/__pycache__/config.cpython-39.pyc differ diff --git a/minigpt4/common/__pycache__/dist_utils.cpython-39.pyc b/minigpt4/common/__pycache__/dist_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c359c86018fc4e25895489b88c1be9760f21de7 Binary files /dev/null and b/minigpt4/common/__pycache__/dist_utils.cpython-39.pyc differ diff --git a/minigpt4/common/__pycache__/logger.cpython-39.pyc b/minigpt4/common/__pycache__/logger.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77f63d41c3fd014adb04b55e483182f41912ff4d Binary files /dev/null and b/minigpt4/common/__pycache__/logger.cpython-39.pyc differ diff --git a/minigpt4/common/__pycache__/registry.cpython-39.pyc b/minigpt4/common/__pycache__/registry.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c711cd6278a14ca3d4a4c6424f7a1d1f8a90ed6 Binary files /dev/null and b/minigpt4/common/__pycache__/registry.cpython-39.pyc differ diff --git a/minigpt4/common/__pycache__/utils.cpython-39.pyc b/minigpt4/common/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..333ddef8d4458da7dd20300c1957b62b3c939134 Binary files /dev/null and b/minigpt4/common/__pycache__/utils.cpython-39.pyc differ diff --git a/minigpt4/common/config.py b/minigpt4/common/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a1d3278bfe9caf59bddecd102d42a79ed8b71e55 --- /dev/null +++ b/minigpt4/common/config.py @@ -0,0 +1,496 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import logging +import json +from typing import Dict + +from omegaconf import OmegaConf +from minigpt4.common.registry import registry + + +class Config: + def __init__(self, args): + self.config = {} + + self.args = args + + # Register the config and configuration for setup + registry.register("configuration", self) + + user_config = self._build_opt_list(self.args.options) + + config = OmegaConf.load(self.args.cfg_path) + + runner_config = self.build_runner_config(config) + model_config = self.build_model_config(config, **user_config) + dataset_config = self.build_dataset_config(config) + evaluation_dataset_config = self.build_evaluation_dataset_config(config) + + # Validate the user-provided runner configuration + # model and dataset configuration are supposed to be validated by the respective classes + # [TODO] validate the model/dataset configuration + # self._validate_runner_config(runner_config) + + # Override the default configuration with user options. + self.config = OmegaConf.merge( + runner_config, model_config, dataset_config,evaluation_dataset_config, user_config + ) + + def _validate_runner_config(self, runner_config): + """ + This method validates the configuration, such that + 1) all the user specified options are valid; + 2) no type mismatches between the user specified options and the config. + """ + runner_config_validator = create_runner_config_validator() + runner_config_validator.validate(runner_config) + + def _build_opt_list(self, opts): + opts_dot_list = self._convert_to_dot_list(opts) + return OmegaConf.from_dotlist(opts_dot_list) + + @staticmethod + def build_model_config(config, **kwargs): + model = config.get("model", None) + assert model is not None, "Missing model configuration file." + + model_cls = registry.get_model_class(model.arch) + assert model_cls is not None, f"Model '{model.arch}' has not been registered." + + model_type = kwargs.get("model.model_type", None) + if not model_type: + model_type = model.get("model_type", None) + # else use the model type selected by user. + + assert model_type is not None, "Missing model_type." + + model_config_path = model_cls.default_config_path(model_type=model_type) + + model_config = OmegaConf.create() + # hierarchy override, customized config > default config + model_config = OmegaConf.merge( + model_config, + OmegaConf.load(model_config_path), + {"model": config["model"]}, + ) + + return model_config + + @staticmethod + def build_runner_config(config): + return {"run": config.run} + + @staticmethod + def build_dataset_config(config): + datasets = config.get("datasets", None) + if datasets is None: + raise KeyError( + "Expecting 'datasets' as the root key for dataset configuration." + ) + + dataset_config = OmegaConf.create() + + for dataset_name in datasets: + builder_cls = registry.get_builder_class(dataset_name) + + dataset_config_type = datasets[dataset_name].get("type", "default") + dataset_config_path = builder_cls.default_config_path( + type=dataset_config_type + ) + + # hierarchy override, customized config > default config + dataset_config = OmegaConf.merge( + dataset_config, + OmegaConf.load(dataset_config_path), + {"datasets": {dataset_name: config["datasets"][dataset_name]}}, + ) + + return dataset_config + + + @staticmethod + def build_evaluation_dataset_config(config): + datasets = config.get("evaluation_datasets", None) + # if datasets is None: + # raise KeyError( + # "Expecting 'datasets' as the root key for dataset configuration." + # ) + + dataset_config = OmegaConf.create() + + if datasets is not None: + for dataset_name in datasets: + builder_cls = registry.get_builder_class(dataset_name) + + # hierarchy override, customized config > default config + dataset_config = OmegaConf.merge( + dataset_config, + {"evaluation_datasets": {dataset_name: config["evaluation_datasets"][dataset_name]}}, + ) + + return dataset_config + + def _convert_to_dot_list(self, opts): + if opts is None: + opts = [] + + if len(opts) == 0: + return opts + + has_equal = opts[0].find("=") != -1 + + if has_equal: + return opts + + return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])] + + def get_config(self): + return self.config + + @property + def run_cfg(self): + return self.config.run + + @property + def datasets_cfg(self): + return self.config.datasets + + @property + def evaluation_datasets_cfg(self): + return self.config.evaluation_datasets + + @property + def model_cfg(self): + return self.config.model + + def pretty_print(self): + logging.info("\n===== Running Parameters =====") + logging.info(self._convert_node_to_json(self.config.run)) + + logging.info("\n====== Dataset Attributes ======") + datasets = self.config.datasets + + for dataset in datasets: + if dataset in self.config.datasets: + logging.info(f"\n======== {dataset} =======") + dataset_config = self.config.datasets[dataset] + logging.info(self._convert_node_to_json(dataset_config)) + else: + logging.warning(f"No dataset named '{dataset}' in config. Skipping") + + logging.info(f"\n====== Model Attributes ======") + logging.info(self._convert_node_to_json(self.config.model)) + + def _convert_node_to_json(self, node): + container = OmegaConf.to_container(node, resolve=True) + return json.dumps(container, indent=4, sort_keys=True) + + def to_dict(self): + return OmegaConf.to_container(self.config) + + +def node_to_dict(node): + return OmegaConf.to_container(node) + + +class ConfigValidator: + """ + This is a preliminary implementation to centralize and validate the configuration. + May be altered in the future. + + A helper class to validate configurations from yaml file. + + This serves the following purposes: + 1. Ensure all the options in the yaml are defined, raise error if not. + 2. when type mismatches are found, the validator will raise an error. + 3. a central place to store and display helpful messages for supported configurations. + + """ + + class _Argument: + def __init__(self, name, choices=None, type=None, help=None): + self.name = name + self.val = None + self.choices = choices + self.type = type + self.help = help + + def __str__(self): + s = f"{self.name}={self.val}" + if self.type is not None: + s += f", ({self.type})" + if self.choices is not None: + s += f", choices: {self.choices}" + if self.help is not None: + s += f", ({self.help})" + return s + + def __init__(self, description): + self.description = description + + self.arguments = dict() + + self.parsed_args = None + + def __getitem__(self, key): + assert self.parsed_args is not None, "No arguments parsed yet." + + return self.parsed_args[key] + + def __str__(self) -> str: + return self.format_help() + + def add_argument(self, *args, **kwargs): + """ + Assume the first argument is the name of the argument. + """ + self.arguments[args[0]] = self._Argument(*args, **kwargs) + + def validate(self, config=None): + """ + Convert yaml config (dict-like) to list, required by argparse. + """ + for k, v in config.items(): + assert ( + k in self.arguments + ), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}.""" + + if self.arguments[k].type is not None: + try: + self.arguments[k].val = self.arguments[k].type(v) + except ValueError: + raise ValueError(f"{k} is not a valid {self.arguments[k].type}.") + + if self.arguments[k].choices is not None: + assert ( + v in self.arguments[k].choices + ), f"""{k} must be one of {self.arguments[k].choices}.""" + + return config + + def format_arguments(self): + return str([f"{k}" for k in sorted(self.arguments.keys())]) + + def format_help(self): + # description + key-value pair string for each argument + help_msg = str(self.description) + return help_msg + ", available arguments: " + self.format_arguments() + + def print_help(self): + # display help message + print(self.format_help()) + + +def create_runner_config_validator(): + validator = ConfigValidator(description="Runner configurations") + + validator.add_argument( + "runner", + type=str, + choices=["runner_base", "runner_iter"], + help="""Runner to use. The "runner_base" uses epoch-based training while iter-based + runner runs based on iters. Default: runner_base""", + ) + # add argumetns for training dataset ratios + validator.add_argument( + "train_dataset_ratios", + type=Dict[str, float], + help="""Ratios of training dataset. This is used in iteration-based runner. + Do not support for epoch-based runner because how to define an epoch becomes tricky. + Default: None""", + ) + validator.add_argument( + "max_iters", + type=float, + help="Maximum number of iterations to run.", + ) + validator.add_argument( + "max_epoch", + type=int, + help="Maximum number of epochs to run.", + ) + # add arguments for iters_per_inner_epoch + validator.add_argument( + "iters_per_inner_epoch", + type=float, + help="Number of iterations per inner epoch. This is required when runner is runner_iter.", + ) + lr_scheds_choices = registry.list_lr_schedulers() + validator.add_argument( + "lr_sched", + type=str, + choices=lr_scheds_choices, + help="Learning rate scheduler to use, from {}".format(lr_scheds_choices), + ) + task_choices = registry.list_tasks() + validator.add_argument( + "task", + type=str, + choices=task_choices, + help="Task to use, from {}".format(task_choices), + ) + # add arguments for init_lr + validator.add_argument( + "init_lr", + type=float, + help="Initial learning rate. This will be the learning rate after warmup and before decay.", + ) + # add arguments for min_lr + validator.add_argument( + "min_lr", + type=float, + help="Minimum learning rate (after decay).", + ) + # add arguments for warmup_lr + validator.add_argument( + "warmup_lr", + type=float, + help="Starting learning rate for warmup.", + ) + # add arguments for learning rate decay rate + validator.add_argument( + "lr_decay_rate", + type=float, + help="Learning rate decay rate. Required if using a decaying learning rate scheduler.", + ) + # add arguments for weight decay + validator.add_argument( + "weight_decay", + type=float, + help="Weight decay rate.", + ) + # add arguments for training batch size + validator.add_argument( + "batch_size_train", + type=int, + help="Training batch size.", + ) + # add arguments for evaluation batch size + validator.add_argument( + "batch_size_eval", + type=int, + help="Evaluation batch size, including validation and testing.", + ) + # add arguments for number of workers for data loading + validator.add_argument( + "num_workers", + help="Number of workers for data loading.", + ) + # add arguments for warm up steps + validator.add_argument( + "warmup_steps", + type=int, + help="Number of warmup steps. Required if a warmup schedule is used.", + ) + # add arguments for random seed + validator.add_argument( + "seed", + type=int, + help="Random seed.", + ) + # add arguments for output directory + validator.add_argument( + "output_dir", + type=str, + help="Output directory to save checkpoints and logs.", + ) + # add arguments for whether only use evaluation + validator.add_argument( + "evaluate", + help="Whether to only evaluate the model. If true, training will not be performed.", + ) + # add arguments for splits used for training, e.g. ["train", "val"] + validator.add_argument( + "train_splits", + type=list, + help="Splits to use for training.", + ) + # add arguments for splits used for validation, e.g. ["val"] + validator.add_argument( + "valid_splits", + type=list, + help="Splits to use for validation. If not provided, will skip the validation.", + ) + # add arguments for splits used for testing, e.g. ["test"] + validator.add_argument( + "test_splits", + type=list, + help="Splits to use for testing. If not provided, will skip the testing.", + ) + # add arguments for accumulating gradient for iterations + validator.add_argument( + "accum_grad_iters", + type=int, + help="Number of iterations to accumulate gradient for.", + ) + + # ====== distributed training ====== + validator.add_argument( + "device", + type=str, + choices=["cpu", "cuda"], + help="Device to use. Support 'cuda' or 'cpu' as for now.", + ) + validator.add_argument( + "world_size", + type=int, + help="Number of processes participating in the job.", + ) + validator.add_argument("dist_url", type=str) + validator.add_argument("distributed", type=bool) + # add arguments to opt using distributed sampler during evaluation or not + validator.add_argument( + "use_dist_eval_sampler", + type=bool, + help="Whether to use distributed sampler during evaluation or not.", + ) + + # ====== task specific ====== + # generation task specific arguments + # add arguments for maximal length of text output + validator.add_argument( + "max_len", + type=int, + help="Maximal length of text output.", + ) + # add arguments for minimal length of text output + validator.add_argument( + "min_len", + type=int, + help="Minimal length of text output.", + ) + # add arguments number of beams + validator.add_argument( + "num_beams", + type=int, + help="Number of beams used for beam search.", + ) + + # vqa task specific arguments + # add arguments for number of answer candidates + validator.add_argument( + "num_ans_candidates", + type=int, + help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""", + ) + # add arguments for inference method + validator.add_argument( + "inference_method", + type=str, + choices=["genearte", "rank"], + help="""Inference method to use for question answering. If rank, requires a answer list.""", + ) + + # ====== model specific ====== + validator.add_argument( + "k_test", + type=int, + help="Number of top k most similar samples from ITC/VTC selection to be tested.", + ) + + return validator diff --git a/minigpt4/common/dist_utils.py b/minigpt4/common/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a6fc1b904dccccbffbd96326b1506f8ff3ca19c1 --- /dev/null +++ b/minigpt4/common/dist_utils.py @@ -0,0 +1,140 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import datetime +import functools +import os + +import torch +import torch.distributed as dist +import timm.models.hub as timm_hub + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop("force", False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def init_distributed_mode(args): + if args.distributed is False: + print("Not using distributed mode") + return + elif "RANK" in os.environ and "WORLD_SIZE" in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ["WORLD_SIZE"]) + args.gpu = int(os.environ["LOCAL_RANK"]) + elif "SLURM_PROCID" in os.environ: + args.rank = int(os.environ["SLURM_PROCID"]) + args.gpu = args.rank % torch.cuda.device_count() + else: + print("Not using distributed mode") + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = "nccl" + print( + "| distributed init (rank {}, world {}): {}".format( + args.rank, args.world_size, args.dist_url + ), + flush=True, + ) + torch.distributed.init_process_group( + backend=args.dist_backend, + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, + timeout=datetime.timedelta( + days=365 + ), # allow auto-downloading and de-compressing + ) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +def get_dist_info(): + if torch.__version__ < "1.0": + initialized = dist._initialized + else: + initialized = dist.is_initialized() + if initialized: + rank = dist.get_rank() + world_size = dist.get_world_size() + else: # non-distributed training + rank = 0 + world_size = 1 + return rank, world_size + + +def main_process(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + +def download_cached_file(url, check_hash=True, progress=False): + """ + Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again. + If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded. + """ + + def get_cached_file_path(): + # a hack to sync the file path across processes + parts = torch.hub.urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(timm_hub.get_cache_dir(), filename) + + return cached_file + + if is_main_process(): + timm_hub.download_cached_file(url, check_hash, progress) + + if is_dist_avail_and_initialized(): + dist.barrier() + + return get_cached_file_path() diff --git a/minigpt4/common/eval_utils.py b/minigpt4/common/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3087d2a820a4e2a0d16b9bbfeeaacb9c474653af --- /dev/null +++ b/minigpt4/common/eval_utils.py @@ -0,0 +1,76 @@ +import argparse +import numpy as np +from nltk.translate.bleu_score import sentence_bleu + +from minigpt4.common.registry import registry +from minigpt4.common.config import Config + +# imports modules for registration +from minigpt4.datasets.builders import * +from minigpt4.models import * +from minigpt4.processors import * +from minigpt4.runners import * +from minigpt4.tasks import * + + + +def eval_parser(): + parser = argparse.ArgumentParser(description="Demo") + parser.add_argument("--cfg-path", required=True, help="path to configuration file.") + parser.add_argument("--name", type=str, default='A2', help="evaluation name") + parser.add_argument("--ckpt", type=str, help="path to configuration file.") + parser.add_argument("--eval_opt", type=str, default='all', help="path to configuration file.") + parser.add_argument("--max_new_tokens", type=int, default=10, help="max number of generated tokens") + parser.add_argument("--batch_size", type=int, default=32) + parser.add_argument("--lora_r", type=int, default=64, help="lora rank of the model") + parser.add_argument("--lora_alpha", type=int, default=16, help="lora alpha") + parser.add_argument( + "--options", + nargs="+", + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file (deprecate), " + "change to --cfg-options instead.", + ) + return parser + + +def prepare_texts(texts, conv_temp): + convs = [conv_temp.copy() for _ in range(len(texts))] + [conv.append_message( + conv.roles[0], ' {}'.format(text)) for conv, text in zip(convs, texts)] + [conv.append_message(conv.roles[1], None) for conv in convs] + texts = [conv.get_prompt() for conv in convs] + return texts + + +def init_model(args): + print('Initialization Model') + cfg = Config(args) + # cfg.model_cfg.ckpt = args.ckpt + # cfg.model_cfg.lora_r = args.lora_r + # cfg.model_cfg.lora_alpha = args.lora_alpha + + model_config = cfg.model_cfg + model_cls = registry.get_model_class(model_config.arch) + model = model_cls.from_config(model_config).to('cuda:0') + +# import pudb; pudb.set_trace() + key = list(cfg.datasets_cfg.keys())[0] + vis_processor_cfg = cfg.datasets_cfg.get(key).vis_processor.train + vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) + print('Initialization Finished') + return model, vis_processor + +def computeIoU(bbox1, bbox2): + x1, y1, x2, y2 = bbox1 + x3, y3, x4, y4 = bbox2 + intersection_x1 = max(x1, x3) + intersection_y1 = max(y1, y3) + intersection_x2 = min(x2, x4) + intersection_y2 = min(y2, y4) + intersection_area = max(0, intersection_x2 - intersection_x1 + 1) * max(0, intersection_y2 - intersection_y1 + 1) + bbox1_area = (x2 - x1 + 1) * (y2 - y1 + 1) + bbox2_area = (x4 - x3 + 1) * (y4 - y3 + 1) + union_area = bbox1_area + bbox2_area - intersection_area + iou = intersection_area / union_area + return iou diff --git a/minigpt4/common/gradcam.py b/minigpt4/common/gradcam.py new file mode 100644 index 0000000000000000000000000000000000000000..d53a5254d4b319eaf2cbfbd081b0ca8e38c5c7a0 --- /dev/null +++ b/minigpt4/common/gradcam.py @@ -0,0 +1,24 @@ +import numpy as np +from matplotlib import pyplot as plt +from scipy.ndimage import filters +from skimage import transform as skimage_transform + + +def getAttMap(img, attMap, blur=True, overlap=True): + attMap -= attMap.min() + if attMap.max() > 0: + attMap /= attMap.max() + attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") + if blur: + attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) + attMap -= attMap.min() + attMap /= attMap.max() + cmap = plt.get_cmap("jet") + attMapV = cmap(attMap) + attMapV = np.delete(attMapV, 3, 2) + if overlap: + attMap = ( + 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img + + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV + ) + return attMap diff --git a/minigpt4/common/logger.py b/minigpt4/common/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5a727213c6478606a154172830cdc43aae6f5a --- /dev/null +++ b/minigpt4/common/logger.py @@ -0,0 +1,195 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import datetime +import logging +import time +from collections import defaultdict, deque + +import torch +import torch.distributed as dist + +from minigpt4.common import dist_utils + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not dist_utils.is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value, + ) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError( + "'{}' object has no attribute '{}'".format(type(self).__name__, attr) + ) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append("{}: {}".format(name, str(meter))) + return self.delimiter.join(loss_str) + + def global_avg(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append("{}: {:.4f}".format(name, meter.global_avg)) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = "" + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt="{avg:.4f}") + data_time = SmoothedValue(fmt="{avg:.4f}") + space_fmt = ":" + str(len(str(len(iterable)))) + "d" + log_msg = [ + header, + "[{0" + space_fmt + "}/{1}]", + "eta: {eta}", + "{meters}", + "time: {time}", + "data: {data}", + ] + if torch.cuda.is_available(): + log_msg.append("max mem: {memory:.0f}") + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB, + ) + ) + else: + print( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + ) + ) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print( + "{} Total time: {} ({:.4f} s / it)".format( + header, total_time_str, total_time / len(iterable) + ) + ) + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def setup_logger(): + logging.basicConfig( + level=logging.INFO if dist_utils.is_main_process() else logging.WARN, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers=[logging.StreamHandler()], + ) diff --git a/minigpt4/common/optims.py b/minigpt4/common/optims.py new file mode 100644 index 0000000000000000000000000000000000000000..58327f723d445633ce7d1b5c3cc799b041319a97 --- /dev/null +++ b/minigpt4/common/optims.py @@ -0,0 +1,119 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import math + +from minigpt4.common.registry import registry + + +@registry.register_lr_scheduler("linear_warmup_step_lr") +class LinearWarmupStepLRScheduler: + def __init__( + self, + optimizer, + max_epoch, + min_lr, + init_lr, + decay_rate=1, + warmup_start_lr=-1, + warmup_steps=0, + **kwargs + ): + self.optimizer = optimizer + + self.max_epoch = max_epoch + self.min_lr = min_lr + + self.decay_rate = decay_rate + + self.init_lr = init_lr + self.warmup_steps = warmup_steps + self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr + + def step(self, cur_epoch, cur_step): + if cur_epoch == 0: + warmup_lr_schedule( + step=cur_step, + optimizer=self.optimizer, + max_step=self.warmup_steps, + init_lr=self.warmup_start_lr, + max_lr=self.init_lr, + ) + else: + step_lr_schedule( + epoch=cur_epoch, + optimizer=self.optimizer, + init_lr=self.init_lr, + min_lr=self.min_lr, + decay_rate=self.decay_rate, + ) + + +@registry.register_lr_scheduler("linear_warmup_cosine_lr") +class LinearWarmupCosineLRScheduler: + def __init__( + self, + optimizer, + max_epoch, + iters_per_epoch, + min_lr, + init_lr, + warmup_steps=0, + warmup_start_lr=-1, + **kwargs + ): + self.optimizer = optimizer + + self.max_epoch = max_epoch + self.iters_per_epoch = iters_per_epoch + self.min_lr = min_lr + + self.init_lr = init_lr + self.warmup_steps = warmup_steps + self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr + + def step(self, cur_epoch, cur_step): + total_cur_step = cur_epoch * self.iters_per_epoch + cur_step + if total_cur_step < self.warmup_steps: + warmup_lr_schedule( + step=cur_step, + optimizer=self.optimizer, + max_step=self.warmup_steps, + init_lr=self.warmup_start_lr, + max_lr=self.init_lr, + ) + else: + cosine_lr_schedule( + epoch=total_cur_step, + optimizer=self.optimizer, + max_epoch=self.max_epoch * self.iters_per_epoch, + init_lr=self.init_lr, + min_lr=self.min_lr, + ) + + +def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): + """Decay the learning rate""" + lr = (init_lr - min_lr) * 0.5 * ( + 1.0 + math.cos(math.pi * epoch / max_epoch) + ) + min_lr + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + +def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): + """Warmup the learning rate""" + lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1)) + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + +def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): + """Decay the learning rate""" + lr = max(min_lr, init_lr * (decay_rate**epoch)) + for param_group in optimizer.param_groups: + param_group["lr"] = lr diff --git a/minigpt4/common/registry.py b/minigpt4/common/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..679467a7411eda19ed956b810c21234322f06779 --- /dev/null +++ b/minigpt4/common/registry.py @@ -0,0 +1,329 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + + +class Registry: + mapping = { + "builder_name_mapping": {}, + "task_name_mapping": {}, + "processor_name_mapping": {}, + "model_name_mapping": {}, + "lr_scheduler_name_mapping": {}, + "runner_name_mapping": {}, + "state": {}, + "paths": {}, + } + + @classmethod + def register_builder(cls, name): + r"""Register a dataset builder to registry with key 'name' + + Args: + name: Key with which the builder will be registered. + + Usage: + + from minigpt4.common.registry import registry + from minigpt4.datasets.base_dataset_builder import BaseDatasetBuilder + """ + + def wrap(builder_cls): + from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder + + assert issubclass( + builder_cls, BaseDatasetBuilder + ), "All builders must inherit BaseDatasetBuilder class, found {}".format( + builder_cls + ) + if name in cls.mapping["builder_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["builder_name_mapping"][name] + ) + ) + cls.mapping["builder_name_mapping"][name] = builder_cls + return builder_cls + + return wrap + + @classmethod + def register_task(cls, name): + r"""Register a task to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(task_cls): + from minigpt4.tasks.base_task import BaseTask + + assert issubclass( + task_cls, BaseTask + ), "All tasks must inherit BaseTask class" + if name in cls.mapping["task_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["task_name_mapping"][name] + ) + ) + cls.mapping["task_name_mapping"][name] = task_cls + return task_cls + + return wrap + + @classmethod + def register_model(cls, name): + r"""Register a task to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(model_cls): + from minigpt4.models import BaseModel + + assert issubclass( + model_cls, BaseModel + ), "All models must inherit BaseModel class" + if name in cls.mapping["model_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["model_name_mapping"][name] + ) + ) + cls.mapping["model_name_mapping"][name] = model_cls + return model_cls + + return wrap + + @classmethod + def register_processor(cls, name): + r"""Register a processor to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(processor_cls): + from minigpt4.processors import BaseProcessor + + assert issubclass( + processor_cls, BaseProcessor + ), "All processors must inherit BaseProcessor class" + if name in cls.mapping["processor_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["processor_name_mapping"][name] + ) + ) + cls.mapping["processor_name_mapping"][name] = processor_cls + return processor_cls + + return wrap + + @classmethod + def register_lr_scheduler(cls, name): + r"""Register a model to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(lr_sched_cls): + if name in cls.mapping["lr_scheduler_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["lr_scheduler_name_mapping"][name] + ) + ) + cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls + return lr_sched_cls + + return wrap + + @classmethod + def register_runner(cls, name): + r"""Register a model to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(runner_cls): + if name in cls.mapping["runner_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["runner_name_mapping"][name] + ) + ) + cls.mapping["runner_name_mapping"][name] = runner_cls + return runner_cls + + return wrap + + @classmethod + def register_path(cls, name, path): + r"""Register a path to registry with key 'name' + + Args: + name: Key with which the path will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + assert isinstance(path, str), "All path must be str." + if name in cls.mapping["paths"]: + raise KeyError("Name '{}' already registered.".format(name)) + cls.mapping["paths"][name] = path + + @classmethod + def register(cls, name, obj): + r"""Register an item to registry with key 'name' + + Args: + name: Key with which the item will be registered. + + Usage:: + + from minigpt4.common.registry import registry + + registry.register("config", {}) + """ + path = name.split(".") + current = cls.mapping["state"] + + for part in path[:-1]: + if part not in current: + current[part] = {} + current = current[part] + + current[path[-1]] = obj + + # @classmethod + # def get_trainer_class(cls, name): + # return cls.mapping["trainer_name_mapping"].get(name, None) + + @classmethod + def get_builder_class(cls, name): + return cls.mapping["builder_name_mapping"].get(name, None) + + @classmethod + def get_model_class(cls, name): + return cls.mapping["model_name_mapping"].get(name, None) + + @classmethod + def get_task_class(cls, name): + return cls.mapping["task_name_mapping"].get(name, None) + + @classmethod + def get_processor_class(cls, name): + return cls.mapping["processor_name_mapping"].get(name, None) + + @classmethod + def get_lr_scheduler_class(cls, name): + return cls.mapping["lr_scheduler_name_mapping"].get(name, None) + + @classmethod + def get_runner_class(cls, name): + return cls.mapping["runner_name_mapping"].get(name, None) + + @classmethod + def list_runners(cls): + return sorted(cls.mapping["runner_name_mapping"].keys()) + + @classmethod + def list_models(cls): + return sorted(cls.mapping["model_name_mapping"].keys()) + + @classmethod + def list_tasks(cls): + return sorted(cls.mapping["task_name_mapping"].keys()) + + @classmethod + def list_processors(cls): + return sorted(cls.mapping["processor_name_mapping"].keys()) + + @classmethod + def list_lr_schedulers(cls): + return sorted(cls.mapping["lr_scheduler_name_mapping"].keys()) + + @classmethod + def list_datasets(cls): + return sorted(cls.mapping["builder_name_mapping"].keys()) + + @classmethod + def get_path(cls, name): + return cls.mapping["paths"].get(name, None) + + @classmethod + def get(cls, name, default=None, no_warning=False): + r"""Get an item from registry with key 'name' + + Args: + name (string): Key whose value needs to be retrieved. + default: If passed and key is not in registry, default value will + be returned with a warning. Default: None + no_warning (bool): If passed as True, warning when key doesn't exist + will not be generated. Useful for MMF's + internal operations. Default: False + """ + original_name = name + name = name.split(".") + value = cls.mapping["state"] + for subname in name: + value = value.get(subname, default) + if value is default: + break + + if ( + "writer" in cls.mapping["state"] + and value == default + and no_warning is False + ): + cls.mapping["state"]["writer"].warning( + "Key {} is not present in registry, returning default value " + "of {}".format(original_name, default) + ) + return value + + @classmethod + def unregister(cls, name): + r"""Remove an item from registry with key 'name' + + Args: + name: Key which needs to be removed. + Usage:: + + from mmf.common.registry import registry + + config = registry.unregister("config") + """ + return cls.mapping["state"].pop(name, None) + + +registry = Registry() diff --git a/minigpt4/common/utils.py b/minigpt4/common/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a3069cd10ce986a1ec249490fa813cae9254bd0d --- /dev/null +++ b/minigpt4/common/utils.py @@ -0,0 +1,424 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import io +import json +import logging +import os +import pickle +import re +import shutil +import urllib +import urllib.error +import urllib.request +from typing import Optional +from urllib.parse import urlparse + +import numpy as np +import pandas as pd +import yaml +from iopath.common.download import download +from iopath.common.file_io import file_lock, g_pathmgr +from minigpt4.common.registry import registry +from torch.utils.model_zoo import tqdm +from torchvision.datasets.utils import ( + check_integrity, + download_file_from_google_drive, + extract_archive, +) + + +def now(): + from datetime import datetime + + return datetime.now().strftime("%Y%m%d%H%M")[:-1] + + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + + +def get_cache_path(rel_path): + return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path)) + + +def get_abs_path(rel_path): + return os.path.join(registry.get_path("library_root"), rel_path) + + +def load_json(filename): + with open(filename, "r") as f: + return json.load(f) + + +# The following are adapted from torchvision and vissl +# torchvision: https://github.com/pytorch/vision +# vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py + + +def makedir(dir_path): + """ + Create the directory if it does not exist. + """ + is_success = False + try: + if not g_pathmgr.exists(dir_path): + g_pathmgr.mkdirs(dir_path) + is_success = True + except BaseException: + print(f"Error creating directory: {dir_path}") + return is_success + + +def get_redirected_url(url: str): + """ + Given a URL, returns the URL it redirects to or the + original URL in case of no indirection + """ + import requests + + with requests.Session() as session: + with session.get(url, stream=True, allow_redirects=True) as response: + if response.history: + return response.url + else: + return url + + +def to_google_drive_download_url(view_url: str) -> str: + """ + Utility function to transform a view URL of google drive + to a download URL for google drive + Example input: + https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view + Example output: + https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp + """ + splits = view_url.split("/") + assert splits[-1] == "view" + file_id = splits[-2] + return f"https://drive.google.com/uc?export=download&id={file_id}" + + +def download_google_drive_url(url: str, output_path: str, output_file_name: str): + """ + Download a file from google drive + Downloading an URL from google drive requires confirmation when + the file of the size is too big (google drive notifies that + anti-viral checks cannot be performed on such files) + """ + import requests + + with requests.Session() as session: + + # First get the confirmation token and append it to the URL + with session.get(url, stream=True, allow_redirects=True) as response: + for k, v in response.cookies.items(): + if k.startswith("download_warning"): + url = url + "&confirm=" + v + + # Then download the content of the file + with session.get(url, stream=True, verify=True) as response: + makedir(output_path) + path = os.path.join(output_path, output_file_name) + total_size = int(response.headers.get("Content-length", 0)) + with open(path, "wb") as file: + from tqdm import tqdm + + with tqdm(total=total_size) as progress_bar: + for block in response.iter_content( + chunk_size=io.DEFAULT_BUFFER_SIZE + ): + file.write(block) + progress_bar.update(len(block)) + + +def _get_google_drive_file_id(url: str) -> Optional[str]: + parts = urlparse(url) + + if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None: + return None + + match = re.match(r"/file/d/(?P[^/]*)", parts.path) + if match is None: + return None + + return match.group("id") + + +def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None: + with open(filename, "wb") as fh: + with urllib.request.urlopen( + urllib.request.Request(url, headers={"User-Agent": "vissl"}) + ) as response: + with tqdm(total=response.length) as pbar: + for chunk in iter(lambda: response.read(chunk_size), ""): + if not chunk: + break + pbar.update(chunk_size) + fh.write(chunk) + + +def download_url( + url: str, + root: str, + filename: Optional[str] = None, + md5: Optional[str] = None, +) -> None: + """Download a file from a url and place it in root. + Args: + url (str): URL to download file from + root (str): Directory to place downloaded file in + filename (str, optional): Name to save the file under. + If None, use the basename of the URL. + md5 (str, optional): MD5 checksum of the download. If None, do not check + """ + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + makedir(root) + + # check if file is already present locally + if check_integrity(fpath, md5): + print("Using downloaded and verified file: " + fpath) + return + + # expand redirect chain if needed + url = get_redirected_url(url) + + # check if file is located on Google Drive + file_id = _get_google_drive_file_id(url) + if file_id is not None: + return download_file_from_google_drive(file_id, root, filename, md5) + + # download the file + try: + print("Downloading " + url + " to " + fpath) + _urlretrieve(url, fpath) + except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] + if url[:5] == "https": + url = url.replace("https:", "http:") + print( + "Failed download. Trying https -> http instead." + " Downloading " + url + " to " + fpath + ) + _urlretrieve(url, fpath) + else: + raise e + + # check integrity of downloaded file + if not check_integrity(fpath, md5): + raise RuntimeError("File not found or corrupted.") + + +def download_and_extract_archive( + url: str, + download_root: str, + extract_root: Optional[str] = None, + filename: Optional[str] = None, + md5: Optional[str] = None, + remove_finished: bool = False, +) -> None: + download_root = os.path.expanduser(download_root) + if extract_root is None: + extract_root = download_root + if not filename: + filename = os.path.basename(url) + + download_url(url, download_root, filename, md5) + + archive = os.path.join(download_root, filename) + print("Extracting {} to {}".format(archive, extract_root)) + extract_archive(archive, extract_root, remove_finished) + + +def cache_url(url: str, cache_dir: str) -> str: + """ + This implementation downloads the remote resource and caches it locally. + The resource will only be downloaded if not previously requested. + """ + parsed_url = urlparse(url) + dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/"))) + makedir(dirname) + filename = url.split("/")[-1] + cached = os.path.join(dirname, filename) + with file_lock(cached): + if not os.path.isfile(cached): + logging.info(f"Downloading {url} to {cached} ...") + cached = download(url, dirname, filename=filename) + logging.info(f"URL {url} cached in {cached}") + return cached + + +# TODO (prigoyal): convert this into RAII-style API +def create_file_symlink(file1, file2): + """ + Simply create the symlinks for a given file1 to file2. + Useful during model checkpointing to symlinks to the + latest successful checkpoint. + """ + try: + if g_pathmgr.exists(file2): + g_pathmgr.rm(file2) + g_pathmgr.symlink(file1, file2) + except Exception as e: + logging.info(f"Could NOT create symlink. Error: {e}") + + +def save_file(data, filename, append_to_json=True, verbose=True): + """ + Common i/o utility to handle saving data to various file formats. + Supported: + .pkl, .pickle, .npy, .json + Specifically for .json, users have the option to either append (default) + or rewrite by passing in Boolean value to append_to_json. + """ + if verbose: + logging.info(f"Saving data to file: {filename}") + file_ext = os.path.splitext(filename)[1] + if file_ext in [".pkl", ".pickle"]: + with g_pathmgr.open(filename, "wb") as fopen: + pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL) + elif file_ext == ".npy": + with g_pathmgr.open(filename, "wb") as fopen: + np.save(fopen, data) + elif file_ext == ".json": + if append_to_json: + with g_pathmgr.open(filename, "a") as fopen: + fopen.write(json.dumps(data, sort_keys=True) + "\n") + fopen.flush() + else: + with g_pathmgr.open(filename, "w") as fopen: + fopen.write(json.dumps(data, sort_keys=True) + "\n") + fopen.flush() + elif file_ext == ".yaml": + with g_pathmgr.open(filename, "w") as fopen: + dump = yaml.dump(data) + fopen.write(dump) + fopen.flush() + else: + raise Exception(f"Saving {file_ext} is not supported yet") + + if verbose: + logging.info(f"Saved data to file: {filename}") + + +def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False): + """ + Common i/o utility to handle loading data from various file formats. + Supported: + .pkl, .pickle, .npy, .json + For the npy files, we support reading the files in mmap_mode. + If the mmap_mode of reading is not successful, we load data without the + mmap_mode. + """ + if verbose: + logging.info(f"Loading data from file: {filename}") + + file_ext = os.path.splitext(filename)[1] + if file_ext == ".txt": + with g_pathmgr.open(filename, "r") as fopen: + data = fopen.readlines() + elif file_ext in [".pkl", ".pickle"]: + with g_pathmgr.open(filename, "rb") as fopen: + data = pickle.load(fopen, encoding="latin1") + elif file_ext == ".npy": + if mmap_mode: + try: + with g_pathmgr.open(filename, "rb") as fopen: + data = np.load( + fopen, + allow_pickle=allow_pickle, + encoding="latin1", + mmap_mode=mmap_mode, + ) + except ValueError as e: + logging.info( + f"Could not mmap {filename}: {e}. Trying without g_pathmgr" + ) + data = np.load( + filename, + allow_pickle=allow_pickle, + encoding="latin1", + mmap_mode=mmap_mode, + ) + logging.info("Successfully loaded without g_pathmgr") + except Exception: + logging.info("Could not mmap without g_pathmgr. Trying without mmap") + with g_pathmgr.open(filename, "rb") as fopen: + data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") + else: + with g_pathmgr.open(filename, "rb") as fopen: + data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") + elif file_ext == ".json": + with g_pathmgr.open(filename, "r") as fopen: + data = json.load(fopen) + elif file_ext == ".yaml": + with g_pathmgr.open(filename, "r") as fopen: + data = yaml.load(fopen, Loader=yaml.FullLoader) + elif file_ext == ".csv": + with g_pathmgr.open(filename, "r") as fopen: + data = pd.read_csv(fopen) + else: + raise Exception(f"Reading from {file_ext} is not supported yet") + return data + + +def abspath(resource_path: str): + """ + Make a path absolute, but take into account prefixes like + "http://" or "manifold://" + """ + regex = re.compile(r"^\w+://") + if regex.match(resource_path) is None: + return os.path.abspath(resource_path) + else: + return resource_path + + +def makedir(dir_path): + """ + Create the directory if it does not exist. + """ + is_success = False + try: + if not g_pathmgr.exists(dir_path): + g_pathmgr.mkdirs(dir_path) + is_success = True + except BaseException: + logging.info(f"Error creating directory: {dir_path}") + return is_success + + +def is_url(input_url): + """ + Check if an input string is a url. look for http(s):// and ignoring the case + """ + is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None + return is_url + + +def cleanup_dir(dir): + """ + Utility for deleting a directory. Useful for cleaning the storage space + that contains various training artifacts like checkpoints, data etc. + """ + if os.path.exists(dir): + logging.info(f"Deleting directory: {dir}") + shutil.rmtree(dir) + logging.info(f"Deleted contents of directory: {dir}") + + +def get_file_size(filename): + """ + Given a file, get the size of file in MB + """ + size_in_mb = os.path.getsize(filename) / float(1024**2) + return size_in_mb diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py new file mode 100644 index 0000000000000000000000000000000000000000..07ca21d805684d71593c8d738798822411bdecc6 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py @@ -0,0 +1,89 @@ +# coding: utf-8 + +import sys +dataDir = '../../VQA' +sys.path.insert(0, '%s/PythonHelperTools/vqaTools' %(dataDir)) +from vqa import VQA +from vqaEvaluation.vqaEval import VQAEval +import matplotlib.pyplot as plt +import skimage.io as io +import json +import random +import os + +# set up file names and paths +versionType ='v2_' # this should be '' when using VQA v2.0 dataset +taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 +dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. +dataSubType ='train2014' +annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) +quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) +imgDir ='%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) +resultType ='fake' +fileTypes = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType'] + +# An example result json file has been provided in './Results' folder. + +[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/Results/%s%s_%s_%s_%s_%s.json'%(dataDir, versionType, taskType, dataType, dataSubType, \ +resultType, fileType) for fileType in fileTypes] + +# create vqa object and vqaRes object +vqa = VQA(annFile, quesFile) +vqaRes = vqa.loadRes(resFile, quesFile) + +# create vqaEval object by taking vqa and vqaRes +vqaEval = VQAEval(vqa, vqaRes, n=2) #n is precision of accuracy (number of places after decimal), default is 2 + +# evaluate results +""" +If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function +By default it uses all the question ids in annotation file +""" +vqaEval.evaluate() + +# print accuracies +print "\n" +print "Overall Accuracy is: %.02f\n" %(vqaEval.accuracy['overall']) +print "Per Question Type Accuracy is the following:" +for quesType in vqaEval.accuracy['perQuestionType']: + print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType]) +print "\n" +print "Per Answer Type Accuracy is the following:" +for ansType in vqaEval.accuracy['perAnswerType']: + print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType]) +print "\n" +# demo how to use evalQA to retrieve low score result +evals = [quesId for quesId in vqaEval.evalQA if vqaEval.evalQA[quesId]<35] #35 is per question percentage accuracy +if len(evals) > 0: + print 'ground truth answers' + randomEval = random.choice(evals) + randomAnn = vqa.loadQA(randomEval) + vqa.showQA(randomAnn) + + print '\n' + print 'generated answer (accuracy %.02f)'%(vqaEval.evalQA[randomEval]) + ann = vqaRes.loadQA(randomEval)[0] + print "Answer: %s\n" %(ann['answer']) + + imgId = randomAnn[0]['image_id'] + imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' + if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# plot accuracy for various question types +plt.bar(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].values(), align='center') +plt.xticks(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].keys(), rotation='0',fontsize=10) +plt.title('Per Question Type Accuracy', fontsize=10) +plt.xlabel('Question Types', fontsize=10) +plt.ylabel('Accuracy', fontsize=10) +plt.show() + +# save evaluation results to ./Results folder +json.dump(vqaEval.accuracy, open(accuracyFile, 'w')) +json.dump(vqaEval.evalQA, open(evalQAFile, 'w')) +json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w')) +json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w')) + diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..148424d7391f6c8e8070f6dd20f02e2ddb1899cc --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py @@ -0,0 +1 @@ +author='aagrawal' diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py new file mode 100644 index 0000000000000000000000000000000000000000..8a656044433b08c3b3a7610e0d4f701c9f3f752a --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py @@ -0,0 +1,192 @@ +# coding=utf-8 + +__author__='aagrawal' + +import re +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). +import sys + + +class VQAEval: + def __init__(self, vqa, vqaRes, n=2): + self.n = n + self.accuracy = {} + self.evalQA = {} + self.evalQuesType = {} + self.evalAnsType = {} + self.vqa = vqa + self.vqaRes = vqaRes + self.params = {'question_id': vqa.getQuesIds()} + self.contractions = {"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", \ + "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", \ + "hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", \ + "he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", \ + "Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", \ + "maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", \ + "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", \ + "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", \ + "she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", \ + "somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", \ + "somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", \ + "someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", \ + "something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", \ + "there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", \ + "they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", \ + "wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", \ + "whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", \ + "whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", \ + "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", \ + "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", \ + "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", \ + "youll": "you'll", "youre": "you're", "youve": "you've"} + self.manualMap = { 'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10' + } + self.articles = ['a', + 'an', + 'the' + ] + + + self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") + self.commaStrip = re.compile("(\d)(\,)(\d)") + self.punct = [';', r"/", '[', ']', '"', '{', '}', + '(', ')', '=', '+', '\\', '_', '-', + '>', '<', '@', '`', ',', '?', '!'] + + + def evaluate(self, quesIds=None): + if quesIds == None: + quesIds = [quesId for quesId in self.params['question_id']] + gts = {} + res = {} + for quesId in quesIds: + gts[quesId] = self.vqa.qa[quesId] + res[quesId] = self.vqaRes.qa[quesId] + + # ================================================= + # Compute accuracy + # ================================================= + accQA = [] + accQuesType = {} + accAnsType = {} + # print "computing accuracy" + step = 0 + for quesId in quesIds: + for ansDic in gts[quesId]['answers']: + ansDic['answer'] = ansDic['answer'].replace('\n', ' ') + ansDic['answer'] = ansDic['answer'].replace('\t', ' ') + ansDic['answer'] = ansDic['answer'].strip() + resAns = res[quesId]['answer'] + resAns = resAns.replace('\n', ' ') + resAns = resAns.replace('\t', ' ') + resAns = resAns.strip() + gtAcc = [] + gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']] + + if len(set(gtAnswers)) > 1: + for ansDic in gts[quesId]['answers']: + ansDic['answer'] = self.processPunctuation(ansDic['answer']) + ansDic['answer'] = self.processDigitArticle(ansDic['answer']) + resAns = self.processPunctuation(resAns) + resAns = self.processDigitArticle(resAns) + + for gtAnsDatum in gts[quesId]['answers']: + otherGTAns = [item for item in gts[quesId]['answers'] if item!=gtAnsDatum] + matchingAns = [item for item in otherGTAns if item['answer'].lower()==resAns.lower()] + acc = min(1, float(len(matchingAns))/3) + gtAcc.append(acc) + quesType = gts[quesId]['question_type'] + ansType = gts[quesId]['answer_type'] + avgGTAcc = float(sum(gtAcc))/len(gtAcc) + accQA.append(avgGTAcc) + if quesType not in accQuesType: + accQuesType[quesType] = [] + accQuesType[quesType].append(avgGTAcc) + if ansType not in accAnsType: + accAnsType[ansType] = [] + accAnsType[ansType].append(avgGTAcc) + self.setEvalQA(quesId, avgGTAcc) + self.setEvalQuesType(quesId, quesType, avgGTAcc) + self.setEvalAnsType(quesId, ansType, avgGTAcc) + if step%100 == 0: + self.updateProgress(step/float(len(quesIds))) + step = step + 1 + + self.setAccuracy(accQA, accQuesType, accAnsType) + # print "Done computing accuracy" + + def processPunctuation(self, inText): + outText = inText + for p in self.punct: + if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None): + outText = outText.replace(p, '') + else: + outText = outText.replace(p, ' ') + outText = self.periodStrip.sub("", + outText, + re.UNICODE) + return outText + + def processDigitArticle(self, inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = self.manualMap.setdefault(word, word) + if word not in self.articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in self.contractions: + outText[wordId] = self.contractions[word] + outText = ' '.join(outText) + return outText + + def setAccuracy(self, accQA, accQuesType, accAnsType): + self.accuracy['overall'] = round(100*float(sum(accQA))/len(accQA), self.n) + self.accuracy['perQuestionType'] = {quesType: round(100*float(sum(accQuesType[quesType]))/len(accQuesType[quesType]), self.n) for quesType in accQuesType} + self.accuracy['perAnswerType'] = {ansType: round(100*float(sum(accAnsType[ansType]))/len(accAnsType[ansType]), self.n) for ansType in accAnsType} + + def setEvalQA(self, quesId, acc): + self.evalQA[quesId] = round(100*acc, self.n) + + def setEvalQuesType(self, quesId, quesType, acc): + if quesType not in self.evalQuesType: + self.evalQuesType[quesType] = {} + self.evalQuesType[quesType][quesId] = round(100*acc, self.n) + + def setEvalAnsType(self, quesId, ansType, acc): + if ansType not in self.evalAnsType: + self.evalAnsType[ansType] = {} + self.evalAnsType[ansType][quesId] = round(100*acc, self.n) + + def updateProgress(self, progress): + barLength = 20 + status = "" + if isinstance(progress, int): + progress = float(progress) + if not isinstance(progress, float): + progress = 0 + status = "error: progress var must be float\r\n" + if progress < 0: + progress = 0 + status = "Halt...\r\n" + if progress >= 1: + progress = 1 + status = "Done...\r\n" + block = int(round(barLength*progress)) + text = "\rFinshed Percent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), int(progress*100), status) + sys.stdout.write(text) + sys.stdout.flush() diff --git a/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py new file mode 100644 index 0000000000000000000000000000000000000000..406b59642a7c2c208b87b0222a299e48a5831eb1 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +from vqaTools.vqa import VQA +import random +import skimage.io as io +import matplotlib.pyplot as plt +import os + +dataDir ='../../VQA' +versionType ='v2_' # this should be '' when using VQA v2.0 dataset +taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 +dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. +dataSubType ='train2014' +annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) +quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) +imgDir = '%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) + +# initialize VQA api for QA annotations +vqa=VQA(annFile, quesFile) + +# load and display QA annotations for given question types +""" +All possible quesTypes for abstract and mscoco has been provided in respective text files in ../QuestionTypes/ folder. +""" +annIds = vqa.getQuesIds(quesTypes='how many'); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# load and display QA annotations for given answer types +""" +ansTypes can be one of the following +yes/no +number +other +""" +annIds = vqa.getQuesIds(ansTypes='yes/no'); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# load and display QA annotations for given images +""" +Usage: vqa.getImgIds(quesIds=[], quesTypes=[], ansTypes=[]) +Above method can be used to retrieve imageIds for given question Ids or given question types or given answer types. +""" +ids = vqa.getImgIds() +annIds = vqa.getQuesIds(imgIds=random.sample(ids,5)); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + diff --git a/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..072d8d90cd261c19c62fa4624ca22471fe72abfd --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py @@ -0,0 +1 @@ +__author__ = 'aagrawal' diff --git a/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..4f769619fc64ce150d1a462d91ea29282f08104a --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py @@ -0,0 +1,179 @@ +__author__ = 'aagrawal' +__version__ = '0.9' + +# Interface for accessing the VQA dataset. + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py). + +# The following functions are defined: +# VQA - VQA class that loads VQA annotation file and prepares data structures. +# getQuesIds - Get question ids that satisfy given filter conditions. +# getImgIds - Get image ids that satisfy given filter conditions. +# loadQA - Load questions and answers with the specified question ids. +# showQA - Display the specified questions and answers. +# loadRes - Load result file and create result object. + +# Help on each function can be accessed by: "help(COCO.function)" + +import json +import datetime +import copy + + +class VQA: + def __init__(self, annotation_file=None, question_file=None): + """ + Constructor of VQA helper class for reading and visualizing questions and answers. + :param annotation_file (str): location of VQA annotation file + :return: + """ + # load dataset + self.dataset = {} + self.questions = {} + self.qa = {} + self.qqa = {} + self.imgToQA = {} + if not annotation_file == None and not question_file == None: + # print 'loading VQA annotations and questions into memory...' + time_t = datetime.datetime.utcnow() + dataset = json.load(open(annotation_file, 'r')) + questions = json.load(open(question_file, 'r')) + # print datetime.datetime.utcnow() - time_t + self.dataset = dataset + self.questions = questions + self.createIndex() + + def createIndex(self): + imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']} + qa = {ann['question_id']: [] for ann in self.dataset['annotations']} + qqa = {ann['question_id']: [] for ann in self.dataset['annotations']} + for ann in self.dataset['annotations']: + imgToQA[ann['image_id']] += [ann] + qa[ann['question_id']] = ann + for ques in self.questions['questions']: + qqa[ques['question_id']] = ques + # print 'index created!' + + # create class members + self.qa = qa + self.qqa = qqa + self.imgToQA = imgToQA + + def info(self): + """ + Print information about the VQA annotation file. + :return: + """ + + # for key, value in self.datset['info'].items(): + # print '%s: %s'%(key, value) + + def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): + """ + Get question ids that satisfy given filter conditions. default skips that filter + :param imgIds (int array) : get question ids for given imgs + quesTypes (str array) : get question ids for given question types + ansTypes (str array) : get question ids for given answer types + :return: ids (int array) : integer array of question ids + """ + imgIds = imgIds if type(imgIds) == list else [imgIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(imgIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset['annotations'] + else: + if not len(imgIds) == 0: + anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], []) + else: + anns = self.dataset['annotations'] + anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes] + anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes] + ids = [ann['question_id'] for ann in anns] + return ids + + def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]): + """ + Get image ids that satisfy given filter conditions. default skips that filter + :param quesIds (int array) : get image ids for given question ids + quesTypes (str array) : get image ids for given question types + ansTypes (str array) : get image ids for given answer types + :return: ids (int array) : integer array of image ids + """ + quesIds = quesIds if type(quesIds) == list else [quesIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(quesIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset['annotations'] + else: + if not len(quesIds) == 0: + anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa], []) + else: + anns = self.dataset['annotations'] + anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes] + anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes] + ids = [ann['image_id'] for ann in anns] + return ids + + def loadQA(self, ids=[]): + """ + Load questions and answers with the specified question ids. + :param ids (int array) : integer ids specifying question ids + :return: qa (object array) : loaded qa objects + """ + if type(ids) == list: + return [self.qa[id] for id in ids] + elif type(ids) == int: + return [self.qa[ids]] + + def showQA(self, anns): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + for ann in anns: + quesId = ann['question_id'] + print("Question: %s" % (self.qqa[quesId]['question'])) + for ans in ann['answers']: + print("Answer %d: %s" % (ans['answer_id'], ans['answer'])) + + def loadRes(self, resFile, quesFile): + """ + Load result file and return a result object. + :param resFile (str) : file name of result file + :return: res (obj) : result api object + """ + res = VQA() + res.questions = json.load(open(quesFile)) + res.dataset['info'] = copy.deepcopy(self.questions['info']) + res.dataset['task_type'] = copy.deepcopy(self.questions['task_type']) + res.dataset['data_type'] = copy.deepcopy(self.questions['data_type']) + res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype']) + res.dataset['license'] = copy.deepcopy(self.questions['license']) + + # print 'Loading and preparing results... ' + time_t = datetime.datetime.utcnow() + anns = json.load(open(resFile)) + assert type(anns) == list, 'results is not an array of objects' + annsQuesIds = [ann['question_id'] for ann in anns] + assert set(annsQuesIds) == set(self.getQuesIds()), \ + 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.' + for ann in anns: + quesId = ann['question_id'] + if res.dataset['task_type'] == 'Multiple Choice': + assert ann['answer'] in self.qqa[quesId][ + 'multiple_choices'], 'predicted answer is not one of the multiple choices' + qaAnn = self.qa[quesId] + ann['image_id'] = qaAnn['image_id'] + ann['question_type'] = qaAnn['question_type'] + ann['answer_type'] = qaAnn['answer_type'] + # print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds()) + + res.dataset['annotations'] = anns + res.createIndex() + return res diff --git a/minigpt4/common/vqa_tools/VQA/QuestionTypes/abstract_v002_question_types.txt b/minigpt4/common/vqa_tools/VQA/QuestionTypes/abstract_v002_question_types.txt new file mode 100644 index 0000000000000000000000000000000000000000..44304fc865d1fee83ca73a36d3fbe2580cc4b5f9 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/QuestionTypes/abstract_v002_question_types.txt @@ -0,0 +1,81 @@ +how many +what color is the +is the +where is the +what +what is +are the +what is the +is there a +does the +is the woman +is the man +what is on the +is it +is the girl +is the boy +is the dog +are they +who is +what kind of +what color are the +what is in the +what is the man +is there +what is the woman +what are the +what is the boy +are there +what is the girl +is this +how +which +how many people are +is the cat +why is the +are +will the +what type of +what is the dog +do +is she +does +do the +is +is the baby +are there any +is the lady +can +what animal is +where are the +is the sun +what are they +did the +what is the cat +what is the lady +how many clouds are +is that +is the little girl +is he +are these +how many trees are +how many pillows +are the people +why +is the young +how many windows are +is this a +what is the little +is the tv +how many animals are +who +how many pictures +how many plants are +how many birds are +what color is +what is the baby +is anyone +what color +how many bushes +is the old man +none of the above diff --git a/minigpt4/common/vqa_tools/VQA/QuestionTypes/mscoco_question_types.txt b/minigpt4/common/vqa_tools/VQA/QuestionTypes/mscoco_question_types.txt new file mode 100644 index 0000000000000000000000000000000000000000..95590506bf8af7ba1eaeb91746b43da0eb9b4baa --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/QuestionTypes/mscoco_question_types.txt @@ -0,0 +1,65 @@ +how many +is the +what +what color is the +what is the +is this +is this a +what is +are the +what kind of +is there a +what type of +is it +what are the +where is the +is there +does the +what color are the +are these +are there +which +is +what is the man +is the man +are +how +does this +what is on the +what does the +how many people are +what is in the +what is this +do +what are +are they +what time +what sport is +are there any +is he +what color is +why +where are the +what color +who is +what animal is +is the woman +is this an +do you +how many people are in +what room is +has +is this person +what is the woman +can you +why is the +is the person +what is the color of the +what is the person +could +was +is that a +what number is +what is the name +what brand +none of the above diff --git a/minigpt4/common/vqa_tools/VQA/README.md b/minigpt4/common/vqa_tools/VQA/README.md new file mode 100644 index 0000000000000000000000000000000000000000..439d59d4d7c761423ab7016ab8768105b2df6c35 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/README.md @@ -0,0 +1,80 @@ +Python API and Evaluation Code for v2.0 and v1.0 releases of the VQA dataset. +=================== +## VQA v2.0 release ## +This release consists of +- Real + - 82,783 MS COCO training images, 40,504 MS COCO validation images and 81,434 MS COCO testing images (images are obtained from [MS COCO website] (http://mscoco.org/dataset/#download)) + - 443,757 questions for training, 214,354 questions for validation and 447,793 questions for testing + - 4,437,570 answers for training and 2,143,540 answers for validation (10 per question) + +There is only one type of task +- Open-ended task + +## VQA v1.0 release ## +This release consists of +- Real + - 82,783 MS COCO training images, 40,504 MS COCO validation images and 81,434 MS COCO testing images (images are obtained from [MS COCO website] (http://mscoco.org/dataset/#download)) + - 248,349 questions for training, 121,512 questions for validation and 244,302 questions for testing (3 per image) + - 2,483,490 answers for training and 1,215,120 answers for validation (10 per question) +- Abstract + - 20,000 training images, 10,000 validation images and 20,000 MS COCO testing images + - 60,000 questions for training, 30,000 questions for validation and 60,000 questions for testing (3 per image) + - 600,000 answers for training and 300,000 answers for validation (10 per question) + +There are two types of tasks +- Open-ended task +- Multiple-choice task (18 choices per question) + +## Requirements ## +- python 2.7 +- scikit-image (visit [this page](http://scikit-image.org/docs/dev/install.html) for installation) +- matplotlib (visit [this page](http://matplotlib.org/users/installing.html) for installation) + +## Files ## +./Questions +- For v2.0, download the question files from the [VQA download page](http://www.visualqa.org/download.html), extract them and place in this folder. +- For v1.0, both real and abstract, question files can be found on the [VQA v1 download page](http://www.visualqa.org/vqa_v1_download.html). +- Question files from Beta v0.9 release (123,287 MSCOCO train and val images, 369,861 questions, 3,698,610 answers) can be found below + - [training question files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Questions_Train_mscoco.zip) + - [validation question files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Questions_Val_mscoco.zip) +- Question files from Beta v0.1 release (10k MSCOCO images, 30k questions, 300k answers) can be found [here](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.1/Questions_Train_mscoco.zip). + +./Annotations +- For v2.0, download the annotations files from the [VQA download page](http://www.visualqa.org/download.html), extract them and place in this folder. +- For v1.0, for both real and abstract, annotation files can be found on the [VQA v1 download page](http://www.visualqa.org/vqa_v1_download.html). +- Annotation files from Beta v0.9 release (123,287 MSCOCO train and val images, 369,861 questions, 3,698,610 answers) can be found below + - [training annotation files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Annotations_Train_mscoco.zip) + - [validation annotation files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Annotations_Val_mscoco.zip) +- Annotation files from Beta v0.1 release (10k MSCOCO images, 30k questions, 300k answers) can be found [here](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.1/Annotations_Train_mscoco.zip). + +./Images +- For real, create a directory with name mscoco inside this directory. For each of train, val and test, create directories with names train2014, val2014 and test2015 respectively inside mscoco directory, download respective images from [MS COCO website](http://mscoco.org/dataset/#download) and place them in respective folders. +- For abstract, create a directory with name abstract_v002 inside this directory. For each of train, val and test, create directories with names train2015, val2015 and test2015 respectively inside abstract_v002 directory, download respective images from [VQA download page](http://www.visualqa.org/download.html) and place them in respective folders. + +./PythonHelperTools +- This directory contains the Python API to read and visualize the VQA dataset +- vqaDemo.py (demo script) +- vqaTools (API to read and visualize data) + +./PythonEvaluationTools +- This directory contains the Python evaluation code +- vqaEvalDemo.py (evaluation demo script) +- vqaEvaluation (evaluation code) + +./Results +- OpenEnded_mscoco_train2014_fake_results.json (an example of a fake results file for v1.0 to run the demo) +- Visit [VQA evaluation page] (http://visualqa.org/evaluation) for more details. + +./QuestionTypes +- This directory contains the following lists of question types for both real and abstract questions (question types are unchanged from v1.0 to v2.0). In a list, if there are question types of length n+k and length n with the same first n words, then the question type of length n does not include questions that belong to the question type of length n+k. +- mscoco_question_types.txt +- abstract_v002_question_types.txt + +## References ## +- [VQA: Visual Question Answering](http://visualqa.org/) +- [Microsoft COCO](http://mscoco.org/) + +## Developers ## +- Aishwarya Agrawal (Virginia Tech) +- Code for API is based on [MSCOCO API code](https://github.com/pdollar/coco). +- The format of the code for evaluation is based on [MSCOCO evaluation code](https://github.com/tylin/coco-caption). diff --git a/minigpt4/common/vqa_tools/VQA/license.txt b/minigpt4/common/vqa_tools/VQA/license.txt new file mode 100644 index 0000000000000000000000000000000000000000..f87c06bb4f439b09dec29988b9b23c5995d0e7d4 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/license.txt @@ -0,0 +1,30 @@ +Copyright (c) 2014, Aishwarya Agrawal +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are +those +of the authors and should not be interpreted as representing official +policies, +either expressed or implied, of the FreeBSD Project. diff --git a/minigpt4/common/vqa_tools/__init__.py b/minigpt4/common/vqa_tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b98da85428159ad0dcfab7685c080848ecf8c7b --- /dev/null +++ b/minigpt4/common/vqa_tools/__init__.py @@ -0,0 +1,8 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +__author__ = "aagrawal" diff --git a/minigpt4/common/vqa_tools/vqa.py b/minigpt4/common/vqa_tools/vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..a386b9094b0528b33e7511aff4027f30459a7ff7 --- /dev/null +++ b/minigpt4/common/vqa_tools/vqa.py @@ -0,0 +1,211 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +__author__ = "aagrawal" +__version__ = "0.9" + +# Interface for accessing the VQA dataset. + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py). + +# The following functions are defined: +# VQA - VQA class that loads VQA annotation file and prepares data structures. +# getQuesIds - Get question ids that satisfy given filter conditions. +# getImgIds - Get image ids that satisfy given filter conditions. +# loadQA - Load questions and answers with the specified question ids. +# showQA - Display the specified questions and answers. +# loadRes - Load result file and create result object. + +# Help on each function can be accessed by: "help(COCO.function)" + +import json +import datetime +import copy + + +class VQA: + def __init__(self, annotation_file=None, question_file=None): + """ + Constructor of VQA helper class for reading and visualizing questions and answers. + :param annotation_file (str): location of VQA annotation file + :return: + """ + # load dataset + self.dataset = {} + self.questions = {} + self.qa = {} + self.qqa = {} + self.imgToQA = {} + if not annotation_file == None and not question_file == None: + print("loading VQA annotations and questions into memory...") + time_t = datetime.datetime.utcnow() + dataset = json.load(open(annotation_file, "r")) + questions = json.load(open(question_file, "r")) + self.dataset = dataset + self.questions = questions + self.createIndex() + + def createIndex(self): + # create index + print("creating index...") + imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]} + qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} + qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} + for ann in self.dataset["annotations"]: + imgToQA[ann["image_id"]] += [ann] + qa[ann["question_id"]] = ann + for ques in self.questions["questions"]: + qqa[ques["question_id"]] = ques + print("index created!") + + # create class members + self.qa = qa + self.qqa = qqa + self.imgToQA = imgToQA + + def info(self): + """ + Print information about the VQA annotation file. + :return: + """ + for key, value in self.datset["info"].items(): + print("%s: %s" % (key, value)) + + def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): + """ + Get question ids that satisfy given filter conditions. default skips that filter + :param imgIds (int array) : get question ids for given imgs + quesTypes (str array) : get question ids for given question types + ansTypes (str array) : get question ids for given answer types + :return: ids (int array) : integer array of question ids + """ + imgIds = imgIds if type(imgIds) == list else [imgIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(imgIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset["annotations"] + else: + if not len(imgIds) == 0: + anns = sum( + [self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], + [], + ) + else: + anns = self.dataset["annotations"] + anns = ( + anns + if len(quesTypes) == 0 + else [ann for ann in anns if ann["question_type"] in quesTypes] + ) + anns = ( + anns + if len(ansTypes) == 0 + else [ann for ann in anns if ann["answer_type"] in ansTypes] + ) + ids = [ann["question_id"] for ann in anns] + return ids + + def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]): + """ + Get image ids that satisfy given filter conditions. default skips that filter + :param quesIds (int array) : get image ids for given question ids + quesTypes (str array) : get image ids for given question types + ansTypes (str array) : get image ids for given answer types + :return: ids (int array) : integer array of image ids + """ + quesIds = quesIds if type(quesIds) == list else [quesIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(quesIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset["annotations"] + else: + if not len(quesIds) == 0: + anns = sum( + [self.qa[quesId] for quesId in quesIds if quesId in self.qa], [] + ) + else: + anns = self.dataset["annotations"] + anns = ( + anns + if len(quesTypes) == 0 + else [ann for ann in anns if ann["question_type"] in quesTypes] + ) + anns = ( + anns + if len(ansTypes) == 0 + else [ann for ann in anns if ann["answer_type"] in ansTypes] + ) + ids = [ann["image_id"] for ann in anns] + return ids + + def loadQA(self, ids=[]): + """ + Load questions and answers with the specified question ids. + :param ids (int array) : integer ids specifying question ids + :return: qa (object array) : loaded qa objects + """ + if type(ids) == list: + return [self.qa[id] for id in ids] + elif type(ids) == int: + return [self.qa[ids]] + + def showQA(self, anns): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + for ann in anns: + quesId = ann["question_id"] + print("Question: %s" % (self.qqa[quesId]["question"])) + for ans in ann["answers"]: + print("Answer %d: %s" % (ans["answer_id"], ans["answer"])) + + def loadRes(self, resFile, quesFile): + """ + Load result file and return a result object. + :param resFile (str) : file name of result file + :return: res (obj) : result api object + """ + res = VQA() + res.questions = json.load(open(quesFile)) + res.dataset["info"] = copy.deepcopy(self.questions["info"]) + res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"]) + res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"]) + res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"]) + res.dataset["license"] = copy.deepcopy(self.questions["license"]) + + print("Loading and preparing results... ") + time_t = datetime.datetime.utcnow() + anns = json.load(open(resFile)) + assert type(anns) == list, "results is not an array of objects" + annsQuesIds = [ann["question_id"] for ann in anns] + assert set(annsQuesIds) == set( + self.getQuesIds() + ), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file." + for ann in anns: + quesId = ann["question_id"] + if res.dataset["task_type"] == "Multiple Choice": + assert ( + ann["answer"] in self.qqa[quesId]["multiple_choices"] + ), "predicted answer is not one of the multiple choices" + qaAnn = self.qa[quesId] + ann["image_id"] = qaAnn["image_id"] + ann["question_type"] = qaAnn["question_type"] + ann["answer_type"] = qaAnn["answer_type"] + print( + "DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds()) + ) + + res.dataset["annotations"] = anns + res.createIndex() + return res diff --git a/minigpt4/common/vqa_tools/vqa_eval.py b/minigpt4/common/vqa_tools/vqa_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..ee808b349bb6166c744338b02af2bc84a68650ff --- /dev/null +++ b/minigpt4/common/vqa_tools/vqa_eval.py @@ -0,0 +1,324 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +# coding=utf-8 + +__author__ = "aagrawal" + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). +import sys +import re + + +class VQAEval: + def __init__(self, vqa=None, vqaRes=None, n=2): + self.n = n + self.accuracy = {} + self.evalQA = {} + self.evalQuesType = {} + self.evalAnsType = {} + self.vqa = vqa + self.vqaRes = vqaRes + if vqa is not None: + self.params = {"question_id": vqa.getQuesIds()} + self.contractions = { + "aint": "ain't", + "arent": "aren't", + "cant": "can't", + "couldve": "could've", + "couldnt": "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + "didnt": "didn't", + "doesnt": "doesn't", + "dont": "don't", + "hadnt": "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + "hasnt": "hasn't", + "havent": "haven't", + "hed": "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + "hes": "he's", + "howd": "how'd", + "howll": "how'll", + "hows": "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + "Im": "I'm", + "Ive": "I've", + "isnt": "isn't", + "itd": "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + "itll": "it'll", + "let's": "let's", + "maam": "ma'am", + "mightnt": "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + "mightve": "might've", + "mustnt": "mustn't", + "mustve": "must've", + "neednt": "needn't", + "notve": "not've", + "oclock": "o'clock", + "oughtnt": "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + "shant": "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + "shouldve": "should've", + "shouldnt": "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": "somebodyd", + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + "somebodyll": "somebody'll", + "somebodys": "somebody's", + "someoned": "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + "someonell": "someone'll", + "someones": "someone's", + "somethingd": "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + "somethingll": "something'll", + "thats": "that's", + "thered": "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + "therere": "there're", + "theres": "there's", + "theyd": "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + "theyll": "they'll", + "theyre": "they're", + "theyve": "they've", + "twas": "'twas", + "wasnt": "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + "weve": "we've", + "werent": "weren't", + "whatll": "what'll", + "whatre": "what're", + "whats": "what's", + "whatve": "what've", + "whens": "when's", + "whered": "where'd", + "wheres": "where's", + "whereve": "where've", + "whod": "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + "wholl": "who'll", + "whos": "who's", + "whove": "who've", + "whyll": "why'll", + "whyre": "why're", + "whys": "why's", + "wont": "won't", + "wouldve": "would've", + "wouldnt": "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + "yall": "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + "youd": "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + "youll": "you'll", + "youre": "you're", + "youve": "you've", + } + self.manualMap = { + "none": "0", + "zero": "0", + "one": "1", + "two": "2", + "three": "3", + "four": "4", + "five": "5", + "six": "6", + "seven": "7", + "eight": "8", + "nine": "9", + "ten": "10", + } + self.articles = ["a", "an", "the"] + + self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") + self.commaStrip = re.compile("(\d)(,)(\d)") + self.punct = [ + ";", + r"/", + "[", + "]", + '"', + "{", + "}", + "(", + ")", + "=", + "+", + "\\", + "_", + "-", + ">", + "<", + "@", + "`", + ",", + "?", + "!", + ] + + def evaluate(self, quesIds=None): + if quesIds == None: + quesIds = [quesId for quesId in self.params["question_id"]] + gts = {} + res = {} + for quesId in quesIds: + gts[quesId] = self.vqa.qa[quesId] + res[quesId] = self.vqaRes.qa[quesId] + + # ================================================= + # Compute accuracy + # ================================================= + accQA = [] + accQuesType = {} + accAnsType = {} + print("computing accuracy") + step = 0 + for quesId in quesIds: + resAns = res[quesId]["answer"] + resAns = resAns.replace("\n", " ") + resAns = resAns.replace("\t", " ") + resAns = resAns.strip() + resAns = self.processPunctuation(resAns) + resAns = self.processDigitArticle(resAns) + gtAcc = [] + gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]] + if len(set(gtAnswers)) > 1: + for ansDic in gts[quesId]["answers"]: + ansDic["answer"] = self.processPunctuation(ansDic["answer"]) + for gtAnsDatum in gts[quesId]["answers"]: + otherGTAns = [ + item for item in gts[quesId]["answers"] if item != gtAnsDatum + ] + matchingAns = [item for item in otherGTAns if item["answer"] == resAns] + acc = min(1, float(len(matchingAns)) / 3) + gtAcc.append(acc) + quesType = gts[quesId]["question_type"] + ansType = gts[quesId]["answer_type"] + avgGTAcc = float(sum(gtAcc)) / len(gtAcc) + accQA.append(avgGTAcc) + if quesType not in accQuesType: + accQuesType[quesType] = [] + accQuesType[quesType].append(avgGTAcc) + if ansType not in accAnsType: + accAnsType[ansType] = [] + accAnsType[ansType].append(avgGTAcc) + self.setEvalQA(quesId, avgGTAcc) + self.setEvalQuesType(quesId, quesType, avgGTAcc) + self.setEvalAnsType(quesId, ansType, avgGTAcc) + if step % 100 == 0: + self.updateProgress(step / float(len(quesIds))) + step = step + 1 + + self.setAccuracy(accQA, accQuesType, accAnsType) + print("Done computing accuracy") + + def processPunctuation(self, inText): + outText = inText + for p in self.punct: + if (p + " " in inText or " " + p in inText) or ( + re.search(self.commaStrip, inText) != None + ): + outText = outText.replace(p, "") + else: + outText = outText.replace(p, " ") + outText = self.periodStrip.sub("", outText, re.UNICODE) + return outText + + def processDigitArticle(self, inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = self.manualMap.setdefault(word, word) + if word not in self.articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in self.contractions: + outText[wordId] = self.contractions[word] + outText = " ".join(outText) + return outText + + def setAccuracy(self, accQA, accQuesType, accAnsType): + self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n) + self.accuracy["perQuestionType"] = { + quesType: round( + 100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]), + self.n, + ) + for quesType in accQuesType + } + self.accuracy["perAnswerType"] = { + ansType: round( + 100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n + ) + for ansType in accAnsType + } + + def setEvalQA(self, quesId, acc): + self.evalQA[quesId] = round(100 * acc, self.n) + + def setEvalQuesType(self, quesId, quesType, acc): + if quesType not in self.evalQuesType: + self.evalQuesType[quesType] = {} + self.evalQuesType[quesType][quesId] = round(100 * acc, self.n) + + def setEvalAnsType(self, quesId, ansType, acc): + if ansType not in self.evalAnsType: + self.evalAnsType[ansType] = {} + self.evalAnsType[ansType][quesId] = round(100 * acc, self.n) + + def updateProgress(self, progress): + barLength = 20 + status = "" + if isinstance(progress, int): + progress = float(progress) + if not isinstance(progress, float): + progress = 0 + status = "error: progress var must be float\r\n" + if progress < 0: + progress = 0 + status = "Halt...\r\n" + if progress >= 1: + progress = 1 + status = "Done...\r\n" + block = int(round(barLength * progress)) + text = "\rFinshed Percent: [{0}] {1}% {2}".format( + "#" * block + "-" * (barLength - block), int(progress * 100), status + ) + sys.stdout.write(text) + sys.stdout.flush() diff --git a/minigpt4/configs/datasets/aokvqa/defaults.yaml b/minigpt4/configs/datasets/aokvqa/defaults.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec8f429915f83a88d371ca8a888d2e67a350bc1f --- /dev/null +++ b/minigpt4/configs/datasets/aokvqa/defaults.yaml @@ -0,0 +1,20 @@ + # Copyright (c) 2022, salesforce.com, inc. + # All rights reserved. + # SPDX-License-Identifier: BSD-3-Clause + # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause + +datasets: + aok_vqa: + # data_dir: ${env.data_dir}/datasets + data_type: images # [images|videos|features] + + build_info: + # Be careful not to append minus sign (-) before split to avoid itemizing + annotations: + train: + url: + - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/aokvqa/aokvqa_v1p0_train.json + storage: + - /root/autodl-tmp/minigpt/aokvqa/aokvqa_v1p0_train.json + images: + storage: /root/autodl-tmp \ No newline at end of file diff --git a/minigpt4/configs/datasets/cc_sbu/align.yaml b/minigpt4/configs/datasets/cc_sbu/align.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad97d74b460b01ad58e8db200f979afa00706470 --- /dev/null +++ b/minigpt4/configs/datasets/cc_sbu/align.yaml @@ -0,0 +1,5 @@ +datasets: + cc_sbu_align: + data_type: images + build_info: + storage: /root/autodl-tmp/cc_sbu_align diff --git a/minigpt4/configs/datasets/cc_sbu/defaults.yaml b/minigpt4/configs/datasets/cc_sbu/defaults.yaml new file mode 100644 index 0000000000000000000000000000000000000000..575aaf921c88b9b5aeb91e33179740065e910e10 --- /dev/null +++ b/minigpt4/configs/datasets/cc_sbu/defaults.yaml @@ -0,0 +1,5 @@ +datasets: + cc_sbu: + data_type: images + build_info: + storage: /root/autodl-tmp/cc_sbu/cc_sbu_dataset/{00000..01255}.tar diff --git a/minigpt4/configs/datasets/coco/caption.yaml b/minigpt4/configs/datasets/coco/caption.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eda297f866b2bd578002b478161527bbe274baa0 --- /dev/null +++ b/minigpt4/configs/datasets/coco/caption.yaml @@ -0,0 +1,21 @@ + # Copyright (c) 2022, salesforce.com, inc. + # All rights reserved. + # SPDX-License-Identifier: BSD-3-Clause + # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause + +datasets: + coco_caption: # name of the dataset builder + # dataset_card: dataset_card/coco_caption.md + # data_dir: ${env.data_dir}/datasets + data_type: images # [images|videos|features] + + build_info: + # Be careful not to append minus sign (-) before split to avoid itemizing + annotations: + train: + url: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json + md5: aa31ac474cf6250ebb81d18348a07ed8 + storage: /root/autodl-tmp/coco_karpathy_train.json + images: + storage: /root/autodl-tmp + diff --git a/minigpt4/configs/datasets/coco/defaults_vqa.yaml b/minigpt4/configs/datasets/coco/defaults_vqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c72b90068578c596bd862ac6b38eedc42196ba06 --- /dev/null +++ b/minigpt4/configs/datasets/coco/defaults_vqa.yaml @@ -0,0 +1,24 @@ + # Copyright (c) 2022, salesforce.com, inc. + # All rights reserved. + # SPDX-License-Identifier: BSD-3-Clause + # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause + +datasets: + coco_vqa: + # data_dir: ${env.data_dir}/datasets + data_type: images # [images|videos|features] + + build_info: + + annotations: + train: + url: + - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_train.json + - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_val.json + storage: + - /root/autodl-tmp/minigpt/cocovqa/vqa_train.json + - /root/autodl-tmp/minigpt/cocovqa/vqa_val.json + images: + storage: /root/autodl-tmp + + \ No newline at end of file diff --git a/minigpt4/configs/datasets/coco_bbox/invrefcoco.yaml b/minigpt4/configs/datasets/coco_bbox/invrefcoco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d1eb96211490587af0bd4a27c18c7aab520aefc --- /dev/null +++ b/minigpt4/configs/datasets/coco_bbox/invrefcoco.yaml @@ -0,0 +1,8 @@ +datasets: + invrefcoco: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/coco + dataset: invrefcoco + splitBy: unc \ No newline at end of file diff --git a/minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml b/minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed87d41be2b05c5a35112c5adc9dea0cb2f2f015 --- /dev/null +++ b/minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml @@ -0,0 +1,8 @@ +datasets: + invrefcocog: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/coco + dataset: invrefcocog + splitBy: umd \ No newline at end of file diff --git a/minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml b/minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21aba9fb560cd573943d3d5d2454853711759260 --- /dev/null +++ b/minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml @@ -0,0 +1,8 @@ +datasets: + invrefcocop: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/coco + dataset: invrefcoco+ + splitBy: unc \ No newline at end of file diff --git a/minigpt4/configs/datasets/coco_bbox/refcoco.yaml b/minigpt4/configs/datasets/coco_bbox/refcoco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e2fd760cf3fca356bdfe39342613a94ae2bfd6b --- /dev/null +++ b/minigpt4/configs/datasets/coco_bbox/refcoco.yaml @@ -0,0 +1,8 @@ +datasets: + refcoco: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/coco + dataset: refcoco + splitBy: unc \ No newline at end of file diff --git a/minigpt4/configs/datasets/coco_bbox/refcocog.yaml b/minigpt4/configs/datasets/coco_bbox/refcocog.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89528efb776cc8a78c9fb0410dc18c5ad413e08c --- /dev/null +++ b/minigpt4/configs/datasets/coco_bbox/refcocog.yaml @@ -0,0 +1,8 @@ +datasets: + refcocog: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/coco + dataset: refcocog + splitBy: umd \ No newline at end of file diff --git a/minigpt4/configs/datasets/coco_bbox/refcocop.yaml b/minigpt4/configs/datasets/coco_bbox/refcocop.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3283497a21305086c1f026b31ad47c667a2bea7f --- /dev/null +++ b/minigpt4/configs/datasets/coco_bbox/refcocop.yaml @@ -0,0 +1,8 @@ +datasets: + refcocop: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/coco + dataset: refcoco+ + splitBy: unc \ No newline at end of file diff --git a/minigpt4/configs/datasets/flickr/caption_to_phrase.yaml b/minigpt4/configs/datasets/flickr/caption_to_phrase.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d38c34db40b18af74f63b5612c1dde40dd233c3a --- /dev/null +++ b/minigpt4/configs/datasets/flickr/caption_to_phrase.yaml @@ -0,0 +1,6 @@ +datasets: + flickr_CaptionToPhrase: + data_type: images + build_info: + image_path: /root/autodl-tmp/filtered_flickr/filtered_flickr/images + ann_path: /root/autodl-tmp/filtered_flickr/filtered_flickr/captiontobbox.json diff --git a/minigpt4/configs/datasets/flickr/default.yaml b/minigpt4/configs/datasets/flickr/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ab01fddce3e6482ab9846923d29bc0111640d76 --- /dev/null +++ b/minigpt4/configs/datasets/flickr/default.yaml @@ -0,0 +1,6 @@ +datasets: + flickr_grounded_caption: + data_type: images + build_info: + image_path: /root/autodl-tmp/filtered_flickr/filtered_flickr/images + ann_path: /root/autodl-tmp/filtered_flickr/filtered_flickr/groundedcaption.json diff --git a/minigpt4/configs/datasets/flickr/object_to_phrase.yaml b/minigpt4/configs/datasets/flickr/object_to_phrase.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1350a358b9758a4657cc8d9ec309a840b71a5ed0 --- /dev/null +++ b/minigpt4/configs/datasets/flickr/object_to_phrase.yaml @@ -0,0 +1,6 @@ +datasets: + flickr_ObjectToPhrase: + data_type: images + build_info: + image_path: /root/autodl-tmp/filtered_flickr/filtered_flickr/images + ann_path: /root/autodl-tmp/filtered_flickr/filtered_flickr/phrasetobbox.json diff --git a/minigpt4/configs/datasets/gqa/balanced_val.yaml b/minigpt4/configs/datasets/gqa/balanced_val.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c71c70c4a6fbb8872fa03324e28689cb6391aba0 --- /dev/null +++ b/minigpt4/configs/datasets/gqa/balanced_val.yaml @@ -0,0 +1,21 @@ + # Copyright (c) 2022, salesforce.com, inc. + # All rights reserved. + # SPDX-License-Identifier: BSD-3-Clause + # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause + +datasets: + gqa: + # data_dir: ${env.data_dir}/datasets + data_type: images # [images|videos|features] + + build_info: + # Be careful not to append minus sign (-) before split to avoid itemizing + annotations: + train: + url: + - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/train_balanced_questions.json + storage: + - /root/autodl-tmp/minigpt/gqa/train_balanced_questions.json + + images: + storage: /root/autodl-tmp/minigpt/gqa diff --git a/minigpt4/configs/datasets/laion/defaults.yaml b/minigpt4/configs/datasets/laion/defaults.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e5e393977a69f0deff226f382cfa17ea6254ae3 --- /dev/null +++ b/minigpt4/configs/datasets/laion/defaults.yaml @@ -0,0 +1,5 @@ +datasets: + laion: + data_type: images + build_info: + storage: /root/autodl-tmp/laion_dataset_part2/{00000..05244}.tar diff --git a/minigpt4/configs/datasets/llava/conversation.yaml b/minigpt4/configs/datasets/llava/conversation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4666816523fce699efe6fd64635f811b678f78d --- /dev/null +++ b/minigpt4/configs/datasets/llava/conversation.yaml @@ -0,0 +1,7 @@ +datasets: + + llava_conversation: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/llava/conversation_58k.json \ No newline at end of file diff --git a/minigpt4/configs/datasets/llava/detail.yaml b/minigpt4/configs/datasets/llava/detail.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52cfa3123175aee79359c3cae252f37666b754b9 --- /dev/null +++ b/minigpt4/configs/datasets/llava/detail.yaml @@ -0,0 +1,6 @@ +datasets: + llava_detail: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/llava/detail_23k.json \ No newline at end of file diff --git a/minigpt4/configs/datasets/llava/reason.yaml b/minigpt4/configs/datasets/llava/reason.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9adfa2679adb5e711a6844ab6b2d0868462e41d4 --- /dev/null +++ b/minigpt4/configs/datasets/llava/reason.yaml @@ -0,0 +1,7 @@ +datasets: + + llava_reason: + data_type: images + build_info: + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/llava/complex_reasoning_77k.json \ No newline at end of file diff --git a/minigpt4/configs/datasets/multitask_conversation/default.yaml b/minigpt4/configs/datasets/multitask_conversation/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c51bdc764eb75f4bfe771ac3fc9d577fe7b4db3d --- /dev/null +++ b/minigpt4/configs/datasets/multitask_conversation/default.yaml @@ -0,0 +1,7 @@ +datasets: + multitask_conversation: + data_type: images + build_info: + + image_path: /root/autodl-tmp/train + ann_path: /root/autodl-tmp/minigpt/multi-task_conversation/multitask_conversation.json \ No newline at end of file diff --git a/minigpt4/configs/datasets/nlp/unnatural_instruction.yaml b/minigpt4/configs/datasets/nlp/unnatural_instruction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..742f0dc33e4d2cc8062b1619e5332b870434ed8d --- /dev/null +++ b/minigpt4/configs/datasets/nlp/unnatural_instruction.yaml @@ -0,0 +1,5 @@ +datasets: + unnatural_instruction: + data_type: text + build_info: + ann_path: /root/autodl-tmp/minigpt/unnatural_instructions/filtered_unnatural_instruction.json \ No newline at end of file diff --git a/minigpt4/configs/datasets/ocrvqa/ocrvqa.yaml b/minigpt4/configs/datasets/ocrvqa/ocrvqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d68860caaf52a773aad1d17e0d78b35b7a677686 --- /dev/null +++ b/minigpt4/configs/datasets/ocrvqa/ocrvqa.yaml @@ -0,0 +1,6 @@ +datasets: + ocrvqa: + data_type: images + build_info: + image_path: /root/autodl-tmp/minigpt/ocrvqa/images + ann_path: /root/autodl-tmp/minigpt/ocrvqa/dataset.json \ No newline at end of file diff --git a/minigpt4/configs/datasets/okvqa/defaults.yaml b/minigpt4/configs/datasets/okvqa/defaults.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ce6258ca8ccdb35377c91ffa37ec5e19782f7778 --- /dev/null +++ b/minigpt4/configs/datasets/okvqa/defaults.yaml @@ -0,0 +1,21 @@ + # Copyright (c) 2022, salesforce.com, inc. + # All rights reserved. + # SPDX-License-Identifier: BSD-3-Clause + # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause + +datasets: + ok_vqa: + # data_dir: ${env.data_dir}/datasets + data_type: images # [images|videos|features] + + build_info: + # Be careful not to append minus sign (-) before split to avoid itemizing + annotations: + train: + url: + # TODO make this order insensitive + - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/okvqa_train.json + storage: + - /root/autodl-tmp/minigpt/okvqa/okvqa_train.json + images: + storage: /root/autodl-tmp \ No newline at end of file diff --git a/minigpt4/configs/datasets/textcaps/caption.yaml b/minigpt4/configs/datasets/textcaps/caption.yaml new file mode 100644 index 0000000000000000000000000000000000000000..35bd7af4d8b78d918d44e57da7ad57737a7afaf7 --- /dev/null +++ b/minigpt4/configs/datasets/textcaps/caption.yaml @@ -0,0 +1,9 @@ +datasets: + textcaps_caption: + data_type: images + + build_info: + image_path: /root/autodl-tmp/minigpt/train_images + ann_path: /root/autodl-tmp/minigpt/TextCaps_0.1_train.json + + diff --git a/minigpt4/configs/datasets/vg/ref.yaml b/minigpt4/configs/datasets/vg/ref.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5a37fac537ab242a1d1e8f185c92a1ad4bdbe16 --- /dev/null +++ b/minigpt4/configs/datasets/vg/ref.yaml @@ -0,0 +1,5 @@ +datasets: + refvg: + data_type: images + build_info: + data_dir: /root/autodl-tmp/minigpt \ No newline at end of file diff --git a/minigpt4/configs/default.yaml b/minigpt4/configs/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff5a6a23fa2e3914938631b96c71fdf723dbbc10 --- /dev/null +++ b/minigpt4/configs/default.yaml @@ -0,0 +1,5 @@ +env: + # For default users + # cache_root: "cache" + # For internal use with persistent storage + cache_root: "/export/home/.cache/minigpt4" diff --git a/minigpt4/configs/models/minigpt4_vicuna0.yaml b/minigpt4/configs/models/minigpt4_vicuna0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46034bc8ef5de78592fd2bc95b2fe07fee9e053d --- /dev/null +++ b/minigpt4/configs/models/minigpt4_vicuna0.yaml @@ -0,0 +1,32 @@ +model: + arch: minigpt4 + + # vit encoder + image_size: 224 + drop_path_rate: 0 + use_grad_checkpoint: False + vit_precision: "fp32" + freeze_vit: True + freeze_qformer: True + + # Q-Former + num_query_token: 32 + + # generation configs + prompt: "" + + llama_model: "phi-2" + +preprocess: + vis_processor: + train: + name: "blip2_image_train" + image_size: 224 + eval: + name: "blip2_image_eval" + image_size: 224 + text_processor: + train: + name: "blip_caption" + eval: + name: "blip_caption" diff --git a/minigpt4/configs/models/minigpt_v2.yaml b/minigpt4/configs/models/minigpt_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf9978b16d245982ad27dda6e535521f74c67d51 --- /dev/null +++ b/minigpt4/configs/models/minigpt_v2.yaml @@ -0,0 +1,31 @@ +model: + arch: minigpt_v2 + + # vit encoder + image_size: 448 + drop_path_rate: 0 + use_grad_checkpoint: False + vit_precision: "fp16" + freeze_vit: True + + # generation configs + prompt: "" + + llama_model: "phi-2" + lora_r: 64 + lora_alpha: 16 + + +preprocess: + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + eval: + name: "blip2_image_eval" + image_size: 448 + text_processor: + train: + name: "blip_caption" + eval: + name: "blip_caption" diff --git a/minigpt4/conversation/__init__.py b/minigpt4/conversation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt4/conversation/__pycache__/__init__.cpython-39.pyc b/minigpt4/conversation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aeb034480cbd102d07fe590a3894303f80d74b6 Binary files /dev/null and b/minigpt4/conversation/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/conversation/__pycache__/conversation.cpython-39.pyc b/minigpt4/conversation/__pycache__/conversation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3283a75e925591cd54e06418600dbc71622db8d Binary files /dev/null and b/minigpt4/conversation/__pycache__/conversation.cpython-39.pyc differ diff --git a/minigpt4/conversation/conversation.py b/minigpt4/conversation/conversation.py new file mode 100644 index 0000000000000000000000000000000000000000..213079facd4c7f7813c6bffc619f0927068b35f7 --- /dev/null +++ b/minigpt4/conversation/conversation.py @@ -0,0 +1,247 @@ +import argparse +import time +from threading import Thread +from PIL import Image + +import torch +from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer +from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer + +import dataclasses +from enum import auto, Enum +from typing import List, Tuple, Any + +from minigpt4.common.registry import registry + +tokenizer = AutoTokenizer.from_pretrained('phi-2') + +class SeparatorStyle(Enum): + """Different separator style.""" + SINGLE = auto() + TWO = auto() + + +@dataclasses.dataclass +class Conversation: + """A class that keeps all conversation history.""" + system: str + roles: List[str] + messages: List[List[str]] + offset: int + # system_img: List[Image.Image] = [] + sep_style: SeparatorStyle = SeparatorStyle.SINGLE + sep: str = "###" + sep2: str = None + + skip_next: bool = False + conv_id: Any = None + + def get_prompt(self): + if self.sep_style == SeparatorStyle.SINGLE: + ret = self.system + self.sep + for role, message in self.messages: + if message: + ret += role + message + self.sep + else: + ret += role + return ret + elif self.sep_style == SeparatorStyle.TWO: + seps = [self.sep, self.sep2] + ret = self.system + seps[0] + for i, (role, message) in enumerate(self.messages): + if message: + ret += role + message + seps[i % 2] + else: + ret += role + return ret + else: + raise ValueError(f"Invalid style: {self.sep_style}") + + def append_message(self, role, message): + self.messages.append([role, message]) + + def to_gradio_chatbot(self): + ret = [] + for i, (role, msg) in enumerate(self.messages[self.offset:]): + if i % 2 == 0: + ret.append([msg, None]) + else: + ret[-1][-1] = msg + return ret + + def copy(self): + return Conversation( + system=self.system, + # system_img=self.system_img, + roles=self.roles, + messages=[[x, y] for x, y in self.messages], + offset=self.offset, + sep_style=self.sep_style, + sep=self.sep, + sep2=self.sep2, + conv_id=self.conv_id) + + def dict(self): + return { + "system": self.system, + # "system_img": self.system_img, + "roles": self.roles, + "messages": self.messages, + "offset": self.offset, + "sep": self.sep, + "sep2": self.sep2, + "conv_id": self.conv_id, + } + + +class StoppingCriteriaSub(StoppingCriteria): + + def __init__(self, stops=[], encounters=1): + super().__init__() + self.stops = stops + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): + for stop in self.stops: + if torch.all(input_ids[:, -len(stop):] == stop).item(): + return True + + return False + + +CONV_VISION = Conversation( + system="Give the following image: ImageContent. " + "You will be able to see the image once I provide it to you. Please answer my questions.", + roles=("Human: ", "Assistant: "), + messages=[], + offset=2, + sep_style=SeparatorStyle.SINGLE, + sep="###", +) + +CONV_VISION_LLama2 = Conversation( + system="Give the following image: ImageContent. " + "You will be able to see the image once I provide it to you. Please answer my questions.", + roles=("Human: ", "Assistant: "), + messages=[], + offset=2, + sep_style=SeparatorStyle.SINGLE, + sep="###", +) + +CONV_VISION_minigptv2 = Conversation( + system="", + roles=("Human: ", "Assistant: "), + messages=[], + offset=2, + sep_style=SeparatorStyle.SINGLE, + sep="###", +) + +class Chat: + def __init__(self, model, vis_processor, device='cuda:0', stopping_criteria=None): + self.device = device + self.model = model + self.vis_processor = vis_processor + + if stopping_criteria is not None: + self.stopping_criteria = stopping_criteria + else: + stop_words_ids = [torch.tensor([2]).to(self.device)] + self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)]) + + def ask(self, text, conv): + if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[0] \ + and conv.messages[-1][1][-6:] == '': # last message is image. + conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text]) + else: + conv.append_message(conv.roles[0], text) + + def answer_prepare(self, conv, img_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, + repetition_penalty=1.05, length_penalty=1, temperature=1.0, max_length=2000): + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + embs = self.model.get_context_emb(prompt, img_list) + + current_max_len = embs.shape[1] + max_new_tokens + if current_max_len - max_length > 0: + print('Warning: The number of tokens in current conversation exceeds the max length. ' + 'The model will not see the contexts outside the range.') + begin_idx = max(0, current_max_len - max_length) + embs = embs[:, begin_idx:] + + generation_kwargs = dict( + inputs_embeds=embs, + max_new_tokens=max_new_tokens, + stopping_criteria=self.stopping_criteria, + num_beams=num_beams, + do_sample=True, + min_length=min_length, + top_p=top_p, + repetition_penalty=repetition_penalty, + length_penalty=length_penalty, + temperature=float(temperature), + pad_token_id=tokenizer.pad_token_id, + bos_token_id=tokenizer.bos_token_id, + eos_token_id=tokenizer.eos_token_id, + ) + return generation_kwargs + + def answer(self, conv, img_list, **kargs): + generation_dict = self.answer_prepare(conv, img_list, **kargs) + output_token = self.model_generate(**generation_dict)[0] + output_text = self.model.llama_tokenizer.decode(output_token, skip_special_tokens=True) + output_text = output_text.split('###')[0] # remove the stop sign '###' + output_text = output_text.split('Assistant:')[-1].strip() + conv.messages[-1][1] = output_text + return output_text, output_token.cpu().numpy() + + def stream_answer(self, conv, img_list, **kargs): + generation_kwargs = self.answer_prepare(conv, img_list, **kargs) + streamer = TextIteratorStreamer(self.model.llama_tokenizer, skip_special_tokens=True) + generation_kwargs['streamer'] = streamer + thread = Thread(target=self.model_generate, kwargs=generation_kwargs) + thread.start() + return streamer + + generated = input_ids + for _ in range(max_length): + output = self.forward(input_ids=generated).logits + next_word_id = output[:, -1, :].argmax(1) + generated = torch.cat((generated, next_word_id.unsqueeze(-1)), dim=1) + + def model_generate(self, *args, **kwargs): + # for 8 bit and 16 bit compatibility + with self.model.maybe_autocast(): + output = self.model.llama_model.generate(*args, **kwargs) + return output + + # def model_generate(self, *args, **kwargs): + # # for 8 bit and 16 bit compatibility + + # with self.model.maybe_autocast(): + # max_length=100 + # for _ in range(max_length): + # output = self.model(**kwargs).logits + # next_word_id = output[:, -1, :].argmax(1) + # generated = torch.cat((generated, next_word_id.unsqueeze(-1)), dim=1) + # return output + + def upload_img(self, image, conv, img_list): + if isinstance(image, str): # is a image path + raw_image = Image.open(image).convert('RGB') + image = self.vis_processor(raw_image).unsqueeze(0).to(self.device) + elif isinstance(image, Image.Image): + raw_image = image + image = self.vis_processor(raw_image).unsqueeze(0).to(self.device) + elif isinstance(image, torch.Tensor): + if len(image.shape) == 3: + image = image.unsqueeze(0) + image = image.to(self.device) + + image_emb, _ = self.model.encode_img(image) + img_list.append(image_emb) + conv.append_message(conv.roles[0], "") + msg = "Received." + # self.conv.append_message(self.conv.roles[1], msg) + return msg + diff --git a/minigpt4/datasets/__init__.py b/minigpt4/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt4/datasets/__pycache__/__init__.cpython-39.pyc b/minigpt4/datasets/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42594fe21d414042d76e8f011962e43f82b701e0 Binary files /dev/null and b/minigpt4/datasets/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/datasets/__pycache__/data_utils.cpython-39.pyc b/minigpt4/datasets/__pycache__/data_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5febf2412acf53053fb4ee126b509fd75e06ae1 Binary files /dev/null and b/minigpt4/datasets/__pycache__/data_utils.cpython-39.pyc differ diff --git a/minigpt4/datasets/builders/__init__.py b/minigpt4/datasets/builders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6d0964063f145c6b119c78460aed69bcc4dfa4c1 --- /dev/null +++ b/minigpt4/datasets/builders/__init__.py @@ -0,0 +1,72 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from minigpt4.datasets.builders.base_dataset_builder import load_dataset_config +from minigpt4.datasets.builders.image_text_pair_builder import ( + CCSBUBuilder, + LaionBuilder, + CCSBUAlignBuilder +) +from minigpt4.common.registry import registry + +__all__ = [ + "CCSBUBuilder", + "LaionBuilder", + "CCSBUAlignBuilder" +] + + +def load_dataset(name, cfg_path=None, vis_path=None, data_type=None): + """ + Example + + >>> dataset = load_dataset("coco_caption", cfg=None) + >>> splits = dataset.keys() + >>> print([len(dataset[split]) for split in splits]) + + """ + if cfg_path is None: + cfg = None + else: + cfg = load_dataset_config(cfg_path) + + try: + builder = registry.get_builder_class(name)(cfg) + except TypeError: + print( + f"Dataset {name} not found. Available datasets:\n" + + ", ".join([str(k) for k in dataset_zoo.get_names()]) + ) + exit(1) + + if vis_path is not None: + if data_type is None: + # use default data type in the config + data_type = builder.config.data_type + + assert ( + data_type in builder.config.build_info + ), f"Invalid data_type {data_type} for {name}." + + builder.config.build_info.get(data_type).storage = vis_path + + dataset = builder.build_datasets() + return dataset + + +class DatasetZoo: + def __init__(self) -> None: + self.dataset_zoo = { + k: list(v.DATASET_CONFIG_DICT.keys()) + for k, v in sorted(registry.mapping["builder_name_mapping"].items()) + } + + def get_names(self): + return list(self.dataset_zoo.keys()) + + +dataset_zoo = DatasetZoo() diff --git a/minigpt4/datasets/builders/__pycache__/__init__.cpython-39.pyc b/minigpt4/datasets/builders/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f21286ef6c0067cefcf1ea1084b2824318a6632 Binary files /dev/null and b/minigpt4/datasets/builders/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/datasets/builders/__pycache__/base_dataset_builder.cpython-39.pyc b/minigpt4/datasets/builders/__pycache__/base_dataset_builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5efce05847e93f131ed5f43ba6db3fefdca3450 Binary files /dev/null and b/minigpt4/datasets/builders/__pycache__/base_dataset_builder.cpython-39.pyc differ diff --git a/minigpt4/datasets/builders/__pycache__/image_text_pair_builder.cpython-39.pyc b/minigpt4/datasets/builders/__pycache__/image_text_pair_builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23130159247bdf01c422b9222f428a1b2713e3c9 Binary files /dev/null and b/minigpt4/datasets/builders/__pycache__/image_text_pair_builder.cpython-39.pyc differ diff --git a/minigpt4/datasets/builders/base_dataset_builder.py b/minigpt4/datasets/builders/base_dataset_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..4b607e3c0a8abaa6b1ccbc711e27ff3755f5ec11 --- /dev/null +++ b/minigpt4/datasets/builders/base_dataset_builder.py @@ -0,0 +1,236 @@ +""" + This file is from + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import logging +import os +import shutil +import warnings + +from omegaconf import OmegaConf +import torch.distributed as dist +from torchvision.datasets.utils import download_url + +import minigpt4.common.utils as utils +from minigpt4.common.dist_utils import is_dist_avail_and_initialized, is_main_process +from minigpt4.common.registry import registry +from minigpt4.processors.base_processor import BaseProcessor + + + +class BaseDatasetBuilder: + train_dataset_cls, eval_dataset_cls = None, None + + def __init__(self, cfg=None): + super().__init__() + + if cfg is None: + # help to create datasets from default config. + self.config = load_dataset_config(self.default_config_path()) + elif isinstance(cfg, str): + self.config = load_dataset_config(cfg) + else: + # when called from task.build_dataset() + self.config = cfg + + self.data_type = self.config.data_type + + self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} + self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} + + def build_datasets(self): + # download, split, etc... + # only called on 1 GPU/TPU in distributed + + if is_main_process(): + self._download_data() + + if is_dist_avail_and_initialized(): + dist.barrier() + + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + datasets = self.build() # dataset['train'/'val'/'test'] + + return datasets + + def build_processors(self): + vis_proc_cfg = self.config.get("vis_processor") + txt_proc_cfg = self.config.get("text_processor") + + if vis_proc_cfg is not None: + vis_train_cfg = vis_proc_cfg.get("train") + vis_eval_cfg = vis_proc_cfg.get("eval") + + self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg) + self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg) + + if txt_proc_cfg is not None: + txt_train_cfg = txt_proc_cfg.get("train") + txt_eval_cfg = txt_proc_cfg.get("eval") + + self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg) + self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg) + + @staticmethod + def _build_proc_from_cfg(cfg): + return ( + registry.get_processor_class(cfg.name).from_config(cfg) + if cfg is not None + else None + ) + + @classmethod + def default_config_path(cls, type="default"): + return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type]) + + def _download_data(self): + self._download_ann() + self._download_vis() + + def _download_ann(self): + """ + Download annotation files if necessary. + All the vision-language datasets should have annotations of unified format. + + storage_path can be: + (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative. + (2) basename/dirname: will be suffixed with base name of URL if dirname is provided. + + Local annotation paths should be relative. + """ + anns = self.config.build_info.annotations + + splits = anns.keys() + + cache_root = registry.get_path("cache_root") + + for split in splits: + info = anns[split] + + urls, storage_paths = info.get("url", None), info.storage + + if isinstance(urls, str): + urls = [urls] + if isinstance(storage_paths, str): + storage_paths = [storage_paths] + + assert len(urls) == len(storage_paths) + + for url_or_filename, storage_path in zip(urls, storage_paths): + # if storage_path is relative, make it full by prefixing with cache_root. + if not os.path.isabs(storage_path): + storage_path = os.path.join(cache_root, storage_path) + + dirname = os.path.dirname(storage_path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + if os.path.isfile(url_or_filename): + src, dst = url_or_filename, storage_path + if not os.path.exists(dst): + shutil.copyfile(src=src, dst=dst) + else: + logging.info("Using existing file {}.".format(dst)) + else: + if os.path.isdir(storage_path): + # if only dirname is provided, suffix with basename of URL. + raise ValueError( + "Expecting storage_path to be a file path, got directory {}".format( + storage_path + ) + ) + else: + filename = os.path.basename(storage_path) + + download_url(url=url_or_filename, root=dirname, filename=filename) + + def _download_vis(self): + + storage_path = self.config.build_info.get(self.data_type).storage + storage_path = utils.get_cache_path(storage_path) + + if not os.path.exists(storage_path): + warnings.warn( + f""" + The specified path {storage_path} for visual inputs does not exist. + Please provide a correct path to the visual inputs or + refer to datasets/download_scripts/README.md for downloading instructions. + """ + ) + + def build(self): + """ + Create by split datasets inheriting torch.utils.data.Datasets. + + # build() can be dataset-specific. Overwrite to customize. + """ + self.build_processors() + + build_info = self.config.build_info + + ann_info = build_info.annotations + vis_info = build_info.get(self.data_type) + + datasets = dict() + for split in ann_info.keys(): + if split not in ["train", "val", "test"]: + continue + + is_train = split == "train" + + # processors + vis_processor = ( + self.vis_processors["train"] + if is_train + else self.vis_processors["eval"] + ) + text_processor = ( + self.text_processors["train"] + if is_train + else self.text_processors["eval"] + ) + + # annotation path + ann_paths = ann_info.get(split).storage + if isinstance(ann_paths, str): + ann_paths = [ann_paths] + + abs_ann_paths = [] + for ann_path in ann_paths: + if not os.path.isabs(ann_path): + ann_path = utils.get_cache_path(ann_path) + abs_ann_paths.append(ann_path) + ann_paths = abs_ann_paths + + # visual data storage path + vis_path = os.path.join(vis_info.storage, split) + + if not os.path.isabs(vis_path): + # vis_path = os.path.join(utils.get_cache_path(), vis_path) + vis_path = utils.get_cache_path(vis_path) + + if not os.path.exists(vis_path): + warnings.warn("storage path {} does not exist.".format(vis_path)) + + # create datasets + dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls + datasets[split] = dataset_cls( + vis_processor=vis_processor, + text_processor=text_processor, + ann_paths=ann_paths, + vis_root=vis_path, + ) + + return datasets + + +def load_dataset_config(cfg_path): + cfg = OmegaConf.load(cfg_path).datasets + cfg = cfg[list(cfg.keys())[0]] + + return cfg diff --git a/minigpt4/datasets/builders/image_text_pair_builder.py b/minigpt4/datasets/builders/image_text_pair_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..fb344f11a7aa2add17ab3161183adfac90ec09df --- /dev/null +++ b/minigpt4/datasets/builders/image_text_pair_builder.py @@ -0,0 +1,535 @@ +import os +import logging +import warnings + +from minigpt4.common.registry import registry +from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder +from minigpt4.datasets.datasets.laion_dataset import LaionDataset +from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset +from minigpt4.datasets.datasets.text_caps import TextCapDataset +from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset +from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset +from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset +from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset +from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset +from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset +from minigpt4.datasets.datasets.gqa_datasets import GQADataset +from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset +from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset +from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset +from minigpt4.datasets.datasets.coco_caption import COCOCapDataset + + +@registry.register_builder("multitask_conversation") +class MultitaskConversationBuilder(BaseDatasetBuilder): + train_dataset_cls = MultiTaskConversationDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/multitask_conversation/default.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + + +@registry.register_builder("unnatural_instruction") +class UnnaturalInstructionBuilder(BaseDatasetBuilder): + train_dataset_cls = UnnaturalDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/nlp/unnatural_instruction.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + ) + + return datasets + + + +@registry.register_builder("llava_detail") +class LlavaDetailBuilder(BaseDatasetBuilder): + train_dataset_cls = LlavaDetailDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/llava/detail.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + + + +@registry.register_builder("llava_reason") +class LlavaReasonBuilder(BaseDatasetBuilder): + train_dataset_cls = LlavaReasonDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/llava/reason.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + +@registry.register_builder("llava_conversation") +class LlavaReasonBuilder(BaseDatasetBuilder): + train_dataset_cls = LlavaConversationDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/llava/conversation.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + + +class AllRefCOCOBuilder(BaseDatasetBuilder): + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + + build_info = self.config.build_info + image_path = build_info.image_path + ann_path = build_info.ann_path + + datasets = dict() + + if not os.path.exists(image_path): + warnings.warn("image path {} does not exist.".format(image_path)) + if not os.path.exists(ann_path): + warnings.warn("ann path {} does not exist.".format(ann_path)) + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=ann_path, + vis_root=image_path, + dataset=build_info.dataset, + splitBy=build_info.splitBy + ) + + return datasets + + +@registry.register_builder("refcoco") +class RefCOCOBuilder(AllRefCOCOBuilder): + train_dataset_cls = ReferCOCODataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco_bbox/refcoco.yaml", + } + +@registry.register_builder("refcocop") +class RefCOCOPBuilder(AllRefCOCOBuilder): + train_dataset_cls = ReferCOCODataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco_bbox/refcocop.yaml", + } + + +@registry.register_builder("refcocog") +class RefCOCOGBuilder(AllRefCOCOBuilder): + train_dataset_cls = ReferCOCODataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco_bbox/refcocog.yaml", + } + +@registry.register_builder("invrefcoco") +class RefCOCOBuilder(AllRefCOCOBuilder): + train_dataset_cls = InvReferCOCODataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco_bbox/invrefcoco.yaml", + } + + +@registry.register_builder("invrefcocop") +class RefCOCOPBuilder(AllRefCOCOBuilder): + train_dataset_cls = InvReferCOCODataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco_bbox/invrefcocop.yaml", + } + + +@registry.register_builder("invrefcocog") +class RefCOCOGBuilder(AllRefCOCOBuilder): + train_dataset_cls = InvReferCOCODataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco_bbox/invrefcocog.yaml", + } + +@registry.register_builder("refvg") +class RefVisualGenomeBuilder(BaseDatasetBuilder): + train_dataset_cls = ReferVisualGenomeDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/vg/ref.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + + build_info = self.config.build_info + data_dir = build_info.data_dir + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + data_dir=data_dir, + ) + + return datasets + + +@registry.register_builder("textcaps_caption") +class TextcapCaptionBuilder(BaseDatasetBuilder): + train_dataset_cls = TextCapDataset + + DATASET_CONFIG_DICT = {"default": "configs/datasets/textcaps/caption.yaml"} + + def _download_ann(self): + pass + + def _download_vis(self): + pass + + def build(self): + self.build_processors() + + build_info = self.config.build_info + + datasets = dict() + split = "train" + + # create datasets + # [NOTE] return inner_datasets (wds.DataPipeline) + dataset_cls = self.train_dataset_cls + datasets[split] = dataset_cls( + vis_processor=self.vis_processors[split], + text_processor=self.text_processors[split], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + +@registry.register_builder("coco_vqa") +class COCOVQABuilder(BaseDatasetBuilder): + train_dataset_cls = COCOVQADataset + + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco/defaults_vqa.yaml", + } + +@registry.register_builder("ok_vqa") +class OKVQABuilder(COCOVQABuilder): + DATASET_CONFIG_DICT = { + "default": "configs/datasets/okvqa/defaults.yaml", + } + + +@registry.register_builder("aok_vqa") +class AOKVQABuilder(BaseDatasetBuilder): + train_dataset_cls = AOKVQADataset + + DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} + + +@registry.register_builder("gqa") +class GQABuilder(BaseDatasetBuilder): + train_dataset_cls = GQADataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/gqa/balanced_val.yaml", + } + + + + +@registry.register_builder("flickr_grounded_caption") +class GroundedCaptionBuilder(BaseDatasetBuilder): + train_dataset_cls = GroundedDetailDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/flickr/default.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + + +@registry.register_builder("flickr_CaptionToPhrase") +class CaptionToPhraseBuilder(BaseDatasetBuilder): + train_dataset_cls = CaptionToObjectDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/flickr/caption_to_phrase.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + +@registry.register_builder("flickr_ObjectToPhrase") +class CaptionToPhraseBuilder(BaseDatasetBuilder): + train_dataset_cls = PhraseToObjectDataset + DATASET_CONFIG_DICT = { + "default": "configs/datasets/flickr/object_to_phrase.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + build_info = self.config.build_info + datasets = dict() + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_path=build_info.ann_path, + vis_root=build_info.image_path, + ) + + return datasets + + + + +class DocumentVQABuilder(BaseDatasetBuilder): + def _download_ann(self): + pass + + def _download_vis(self): + pass + + def build(self): + self.build_processors() + build_info = self.config.build_info + + datasets = dict() + split = "train" + + dataset_cls = self.train_dataset_cls + datasets[split] = dataset_cls( + vis_processor=self.vis_processors[split], + text_processor=self.text_processors[split], + vis_root=build_info.image_path, + ann_path=build_info.ann_path + ) + + return datasets + + +@registry.register_builder("ocrvqa") +class OCRVQABuilder(DocumentVQABuilder): + train_dataset_cls = OCRVQADataset + DATASET_CONFIG_DICT = {"default": "configs/datasets/ocrvqa/ocrvqa.yaml"} + + +@registry.register_builder("cc_sbu") +class CCSBUBuilder(BaseDatasetBuilder): + train_dataset_cls = CCSBUDataset + + DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"} + + def _download_ann(self): + pass + + def _download_vis(self): + pass + + def build(self): + self.build_processors() + + build_info = self.config.build_info + + datasets = dict() + split = "train" + + # create datasets + # [NOTE] return inner_datasets (wds.DataPipeline) + dataset_cls = self.train_dataset_cls + datasets[split] = dataset_cls( + vis_processor=self.vis_processors[split], + text_processor=self.text_processors[split], + location=build_info.storage, + ).inner_dataset + + return datasets + + +@registry.register_builder("laion") +class LaionBuilder(BaseDatasetBuilder): + train_dataset_cls = LaionDataset + + DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"} + + def _download_ann(self): + pass + + def _download_vis(self): + pass + + def build(self): + self.build_processors() + + build_info = self.config.build_info + + datasets = dict() + split = "train" + + # create datasets + # [NOTE] return inner_datasets (wds.DataPipeline) + dataset_cls = self.train_dataset_cls + datasets[split] = dataset_cls( + vis_processor=self.vis_processors[split], + text_processor=self.text_processors[split], + location=build_info.storage, + ).inner_dataset + + return datasets + + + +@registry.register_builder("coco_caption") +class COCOCapBuilder(BaseDatasetBuilder): + train_dataset_cls = COCOCapDataset + + DATASET_CONFIG_DICT = { + "default": "configs/datasets/coco/caption.yaml", + } + + + +@registry.register_builder("cc_sbu_align") +class CCSBUAlignBuilder(BaseDatasetBuilder): + train_dataset_cls = CCSBUAlignDataset + + DATASET_CONFIG_DICT = { + "default": "configs/datasets/cc_sbu/align.yaml", + } + + def build_datasets(self): + # at this point, all the annotations and image/videos should be all downloaded to the specified locations. + logging.info("Building datasets...") + self.build_processors() + + build_info = self.config.build_info + storage_path = build_info.storage + + datasets = dict() + + if not os.path.exists(storage_path): + warnings.warn("storage path {} does not exist.".format(storage_path)) + + # create datasets + dataset_cls = self.train_dataset_cls + datasets['train'] = dataset_cls( + vis_processor=self.vis_processors["train"], + text_processor=self.text_processors["train"], + ann_paths=[os.path.join(storage_path, 'filter_cap.json')], + vis_root=os.path.join(storage_path, 'image'), + ) + + return datasets diff --git a/minigpt4/datasets/data_utils.py b/minigpt4/datasets/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d1feb40e59b7407e13c0a549f357262cffc8c35e --- /dev/null +++ b/minigpt4/datasets/data_utils.py @@ -0,0 +1,198 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import gzip +import logging +import os +import random as rnd +import tarfile +import zipfile +import random +from typing import List +from tqdm import tqdm + +import decord +import webdataset as wds +import numpy as np +import torch +from torch.utils.data.dataset import IterableDataset + +from minigpt4.common.registry import registry +from minigpt4.datasets.datasets.base_dataset import ConcatDataset + + +decord.bridge.set_bridge("torch") +MAX_INT = registry.get("MAX_INT") + + +class ChainDataset(wds.DataPipeline): + r"""Dataset for chaining multiple :class:`DataPipeline` s. + + This class is useful to assemble different existing dataset streams. The + chaining operation is done on-the-fly, so concatenating large-scale + datasets with this class will be efficient. + + Args: + datasets (iterable of IterableDataset): datasets to be chained together + """ + def __init__(self, datasets: List[wds.DataPipeline]) -> None: + super().__init__() + self.datasets = datasets + self.prob = [] + self.names = [] + for dataset in self.datasets: + if hasattr(dataset, 'name'): + self.names.append(dataset.name) + else: + self.names.append('Unknown') + if hasattr(dataset, 'sample_ratio'): + self.prob.append(dataset.sample_ratio) + else: + self.prob.append(1) + logging.info("One of the datapipeline doesn't define ratio and set to 1 automatically.") + + def __iter__(self): + datastreams = [iter(dataset) for dataset in self.datasets] + while True: + select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0] + yield next(select_datastream) + + +def apply_to_sample(f, sample): + if len(sample) == 0: + return {} + + def _apply(x): + if torch.is_tensor(x): + return f(x) + elif isinstance(x, dict): + return {key: _apply(value) for key, value in x.items()} + elif isinstance(x, list): + return [_apply(x) for x in x] + else: + return x + + return _apply(sample) + + +def move_to_cuda(sample): + def _move_to_cuda(tensor): + return tensor.cuda() + + return apply_to_sample(_move_to_cuda, sample) + + +def prepare_sample(samples, cuda_enabled=True): + if cuda_enabled: + samples = move_to_cuda(samples) + + # TODO fp16 support + + return samples + + +def reorg_datasets_by_split(datasets, batch_sizes): + """ + Organizes datasets by split. + + Args: + datasets: dict of torch.utils.data.Dataset objects by name. + + Returns: + Dict of datasets by split {split_name: List[Datasets]}. + """ + # if len(datasets) == 1: + # return datasets[list(datasets.keys())[0]] + # else: + reorg_datasets = dict() + reorg_batch_sizes = dict() + + # reorganize by split + for dataset_name, dataset in datasets.items(): + for split_name, dataset_split in dataset.items(): + if split_name not in reorg_datasets: + reorg_datasets[split_name] = [dataset_split] + reorg_batch_sizes[split_name] = [batch_sizes[dataset_name]] + else: + reorg_datasets[split_name].append(dataset_split) + reorg_batch_sizes[split_name].append(batch_sizes[dataset_name]) + + return reorg_datasets, reorg_batch_sizes + + +def concat_datasets(datasets): + """ + Concatenates multiple datasets into a single dataset. + + It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support + generic IterableDataset because it requires creating separate samplers. + + Now only supports conctenating training datasets and assuming validation and testing + have only a single dataset. This is because metrics should not be computed on the concatenated + datasets. + + Args: + datasets: dict of torch.utils.data.Dataset objects by split. + + Returns: + Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets, + "val" and "test" remain the same. + + If the input training datasets contain both map-style and DataPipeline datasets, returns + a tuple, where the first element is a concatenated map-style dataset and the second + element is a chained DataPipeline dataset. + + """ + # concatenate datasets in the same split + for split_name in datasets: + if split_name != "train": + assert ( + len(datasets[split_name]) == 1 + ), "Do not support multiple {} datasets.".format(split_name) + datasets[split_name] = datasets[split_name][0] + else: + iterable_datasets, map_datasets = [], [] + for dataset in datasets[split_name]: + if isinstance(dataset, wds.DataPipeline): + logging.info( + "Dataset {} is IterableDataset, can't be concatenated.".format( + dataset + ) + ) + iterable_datasets.append(dataset) + elif isinstance(dataset, IterableDataset): + raise NotImplementedError( + "Do not support concatenation of generic IterableDataset." + ) + else: + map_datasets.append(dataset) + + # if len(iterable_datasets) > 0: + # concatenate map-style datasets and iterable-style datasets separately + if len(iterable_datasets) > 1: + chained_datasets = ( + ChainDataset(iterable_datasets) + ) + elif len(iterable_datasets) == 1: + chained_datasets = iterable_datasets[0] + else: + chained_datasets = None + + concat_datasets = ( + ConcatDataset(map_datasets) if len(map_datasets) > 0 else None + ) + + train_datasets = concat_datasets, chained_datasets + train_datasets = tuple([x for x in train_datasets if x is not None]) + train_datasets = ( + train_datasets[0] if len(train_datasets) == 1 else train_datasets + ) + + datasets[split_name] = train_datasets + + return datasets + diff --git a/minigpt4/datasets/datasets/__init__.py b/minigpt4/datasets/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt4/datasets/datasets/__pycache__/__init__.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe64f69ae5f0564c46d4aaf83acd012193da44c0 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/aok_vqa_datasets.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/aok_vqa_datasets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd378c0e1c8c8462c49848a65d59d5f091a3214f Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/aok_vqa_datasets.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/base_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/base_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49a837e570ab9046973030310fc28d4987f00229 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/base_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/caption_datasets.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/caption_datasets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a32d254dfe1c846194d94e1b85913723789ac69 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/caption_datasets.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/cc_sbu_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/cc_sbu_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d21352adc8b918202fdb313ca56772b17329ff36 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/cc_sbu_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/coco_caption.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/coco_caption.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df8c42fbc19c0aa495bb354920c2e870c6178f92 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/coco_caption.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/coco_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/coco_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6be47d20fee87990934ca9ac9bd2593560315f45 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/coco_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/coco_vqa_datasets.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/coco_vqa_datasets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4599ba6b5dbc63571627279f0c5e51b22d5e83ec Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/coco_vqa_datasets.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/dataloader_utils.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/dataloader_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..844b69e4350fb7d6e173abfd6bb219e4a6f6622a Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/dataloader_utils.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/flickr.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/flickr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f072411ebe73776c2d6bbafc06ba2a551088eee Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/flickr.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/gqa_datasets.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/gqa_datasets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7431298b3a554d97e338109221857f062f4d3d1 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/gqa_datasets.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/laion_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/laion_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0badc620eed13fbe87372263a0d4708a8d5644f1 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/laion_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/llava_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/llava_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76fd6b55dcd95f33a0776002cdcee261a8b18ead Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/llava_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/multitask_conversation.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/multitask_conversation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d621dd1ad12d065427c8160e1ad45d3b7bf5946 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/multitask_conversation.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/ocrvqa_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/ocrvqa_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac96aec890446462ea1f09c15f3e89e942823d2c Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/ocrvqa_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/text_caps.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/text_caps.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..517f3420a50e47f9de8b7e4715c0ce1c539bef4f Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/text_caps.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/unnatural_instruction.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/unnatural_instruction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20b266885211db7c7cc526d75407cc2ff8642d6f Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/unnatural_instruction.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/vg_dataset.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/vg_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fb2f169fc767b81238804824dafd3f7c3b65a8d Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/vg_dataset.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/__pycache__/vqa_datasets.cpython-39.pyc b/minigpt4/datasets/datasets/__pycache__/vqa_datasets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b02000444571bd0d46a720bca85283ba27cf546 Binary files /dev/null and b/minigpt4/datasets/datasets/__pycache__/vqa_datasets.cpython-39.pyc differ diff --git a/minigpt4/datasets/datasets/aok_vqa_datasets.py b/minigpt4/datasets/datasets/aok_vqa_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..00ed06dc7ee4c056f330e9e6ac6717260afd4a4f --- /dev/null +++ b/minigpt4/datasets/datasets/aok_vqa_datasets.py @@ -0,0 +1,116 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from collections import OrderedDict +import json +import os +import random +import torch + +from PIL import Image + +from minigpt4.datasets.datasets.vqa_datasets import VQADataset #, VQAEvalDataset + + +class __DisplMixin: + def displ_item(self, index): + sample, ann = self.__getitem__(index), self.annotation[index] + return OrderedDict( + { + "file": ann["image"], + "question": ann["question"], + "question_id": ann["question_id"], + "direct_answers": "; ".join(ann["direct_answers"]), + "choices": "; ".join(ann["choices"]), + "correct_choice": ann["choices"][ann["correct_choice_idx"]], + "image": sample["image"], + } + ) + + +class AOKVQADataset(VQADataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + self.instruction_pool =[ + "[vqa] {}", + "[vqa] Based on the image, respond to this question with a short answer: {}" + ] + + exist_annotation = [] + for ann in self.annotation: + image_path = os.path.join(self.vis_root, ann["image"].split('/')[-1]) + if os.path.exists(image_path): + exist_annotation.append(ann) + self.annotation = exist_annotation + + def get_data(self, index): + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"].split('/')[-1]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + question = self.text_processor(ann["question"]) + + answer_key = "direct_answers" + + answer_weight = {} + for answer in ann[answer_key]: + if answer in answer_weight.keys(): + answer_weight[answer] += 1 / len(ann[answer_key]) + else: + answer_weight[answer] = 1 / len(ann[answer_key]) + + answers = list(answer_weight.keys()) + weights = list(answer_weight.values()) + + answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights + + return { + "image": image, + "question": question, + "answer": answer, + } + + def __getitem__(self, index): + data = self.get_data(index) + question = self.text_processor(data["question"]) + instruction = random.choice(self.instruction_pool).format(question) + + instruction = " {} ".format(instruction) + answer = self.text_processor(data['answer']) + + return { + "image": data['image'], + "instruction_input": instruction, + "answer": answer, + } + + +class AOKVQGDataset(AOKVQADataset): + + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + self.instruction_pool = [ + 'Given the image, generate a question whose answer is: {}', + 'Based on the image, provide a question with the answer: {}', + 'Given the visual representation, create a question for which the answer is "{}"', + 'From the image provided, craft a question that leads to the reply: {}', + 'Considering the picture, come up with a question where the answer is: {}', + 'Taking the image into account, generate an question that has the answer: {}' + ] + + def __getitem__(self, index): + data = self.get_data(index) + instruction = random.choice(self.instruction_pool).format(data['answer']) + + return { + "image": data['image'], + "instruction_input": instruction, + "answer": data['question'], + } diff --git a/minigpt4/datasets/datasets/base_dataset.py b/minigpt4/datasets/datasets/base_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..97aed82e519285dac17e025d9b51a2dd292972ad --- /dev/null +++ b/minigpt4/datasets/datasets/base_dataset.py @@ -0,0 +1,78 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import json +from typing import Iterable + +from torch.utils.data import Dataset, ConcatDataset +from torch.utils.data.dataloader import default_collate + + + + +class BaseDataset(Dataset): + def __init__( + self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[] + ): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.annotation = [] + # print("ann paths", ann_paths) + for ann_path in ann_paths: + # print("ann_path", ann_path) + ann = json.load(open(ann_path, "r")) + if isinstance(ann, dict): + self.annotation.extend(json.load(open(ann_path, "r"))['annotations']) + # self.annotation.extend(json.load(open(ann_path, "r"))) + else: + self.annotation.extend(json.load(open(ann_path, "r"))) + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self._add_instance_ids() + + def __len__(self): + return len(self.annotation) + + def collater(self, samples): + return default_collate(samples) + + def set_processors(self, vis_processor, text_processor): + self.vis_processor = vis_processor + self.text_processor = text_processor + + def _add_instance_ids(self, key="instance_id"): + for idx, ann in enumerate(self.annotation): + ann[key] = str(idx) + + + +class ConcatDataset(ConcatDataset): + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__(datasets) + + def collater(self, samples): + # TODO For now only supports datasets with same underlying collater implementations + + all_keys = set() + for s in samples: + all_keys.update(s) + + shared_keys = all_keys + for s in samples: + shared_keys = shared_keys & set(s.keys()) + + samples_shared_keys = [] + for s in samples: + samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys}) + + return self.datasets[0].collater(samples_shared_keys) diff --git a/minigpt4/datasets/datasets/caption_datasets.py b/minigpt4/datasets/datasets/caption_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..6a432a7da07f2d4b0a4d20f49d61202ccf7d8400 --- /dev/null +++ b/minigpt4/datasets/datasets/caption_datasets.py @@ -0,0 +1,151 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import os +from collections import OrderedDict + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from PIL import Image +import random + + +class __DisplMixin: + def displ_item(self, index): + sample, ann = self.__getitem__(index), self.annotation[index] + + return OrderedDict( + { + "file": ann["image"], + "caption": ann["caption"], + "image": sample["image"], + } + ) + + +class CaptionDataset(BaseDataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + self.img_ids = {} + n = 0 + for ann in self.annotation: + img_id = ann["image_id"] + if img_id not in self.img_ids.keys(): + self.img_ids[img_id] = n + n += 1 + + def __getitem__(self, index): + + # TODO this assumes image input, not general enough + ann = self.annotation[index] + + img_file = '{:0>12}.jpg'.format(ann["image_id"]) + image_path = os.path.join(self.vis_root, img_file) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + caption = self.text_processor(ann["caption"]) + + return { + "image": image, + "text_input": caption, + "image_id": self.img_ids[ann["image_id"]], + } + + + +class COCOCaptionDataset(BaseDataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + self.img_ids = {} + n = 0 + + self.filter_anntation = [] + + for ann in self.annotation: + if "train" in ann["image"]: + self.filter_anntation.append(ann) + self.annotation = self.filter_anntation + + for ann in self.annotation: + img_id = ann["image_id"] + if img_id not in self.img_ids.keys(): + self.img_ids[img_id] = n + n += 1 + + self.instruction_pool = [ + 'Briefly describe this image.', + 'Provide a concise depiction of this image.', + 'Present a short description of this image.', + 'Summarize this image in a few words.', + 'A short image caption:', + 'A short image description:', + 'A photo of ', + 'An image that shows ', + 'Write a short description for the image. ', + 'Write a description for the photo.', + 'Provide a description of what is presented in the photo.', + 'Briefly describe the content of the image.', + 'Can you briefly explain what you see in the image?', + 'Could you use a few words to describe what you perceive in the photo?', + 'Please provide a short depiction of the picture.', + 'Using language, provide a short account of the image.', + 'Use a few words to illustrate what is happening in the picture.', + ] + def __getitem__(self, index): + + # TODO this assumes image input, not general enough + ann = self.annotation[index] + + img_file = ann["image"].split("/")[-1] + image_path = os.path.join(self.vis_root, img_file) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + caption = self.text_processor(ann["caption"]) + + instruction = random.choice(self.instruction_pool) + instruction = " [caption] {} ".format(instruction) + + return { + "image": image, + "answer": caption, + "instruction_input": instruction, + } + +class CaptionEvalDataset(BaseDataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + split (string): val or test + """ + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + def __getitem__(self, index): + + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + + return { + "image": image, + "image_id": ann["image_id"], + "instance_id": ann["instance_id"], + } diff --git a/minigpt4/datasets/datasets/cc_sbu_dataset.py b/minigpt4/datasets/datasets/cc_sbu_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..80b658d97ad47052653cecf25daeb512793bfc7b --- /dev/null +++ b/minigpt4/datasets/datasets/cc_sbu_dataset.py @@ -0,0 +1,47 @@ +import os +from PIL import Image +import webdataset as wds +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + +class CCSBUDataset(BaseDataset): + def __init__(self, vis_processor, text_processor, location): + super().__init__(vis_processor=vis_processor, text_processor=text_processor) + + self.inner_dataset = wds.DataPipeline( + wds.ResampledShards(location), + wds.tarfile_to_samples(handler=wds.warn_and_continue), + wds.shuffle(1000, handler=wds.warn_and_continue), + wds.decode("pilrgb", handler=wds.warn_and_continue), + wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), + wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), + wds.map(self.to_dict, handler=wds.warn_and_continue), + ) + + def to_dict(self, sample): + return { + "image": sample[0], + "answer": self.text_processor(sample[1]["caption"]), + } + + +class CCSBUAlignDataset(CaptionDataset): + + def __getitem__(self, index): + + # TODO this assumes image input, not general enough + ann = self.annotation[index] + + img_file = '{}.jpg'.format(ann["image_id"]) + image_path = os.path.join(self.vis_root, img_file) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + caption = ann["caption"] + + return { + "image": image, + "answer": caption, + "image_id": self.img_ids[ann["image_id"]], + } \ No newline at end of file diff --git a/minigpt4/datasets/datasets/coco_caption.py b/minigpt4/datasets/datasets/coco_caption.py new file mode 100644 index 0000000000000000000000000000000000000000..5f260714f08c3697b78ec2cbbd3072f11986d5e3 --- /dev/null +++ b/minigpt4/datasets/datasets/coco_caption.py @@ -0,0 +1,135 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import os +import json +import torch +import numpy as np +import time +from PIL import Image +from PIL import ImageFile +from tqdm import tqdm +ImageFile.LOAD_TRUNCATED_IMAGES = True + +from minigpt4.datasets.datasets.caption_datasets import COCOCaptionDataset, CaptionEvalDataset + +COCOCapDataset = COCOCaptionDataset + + + + + +class COCOCapEvalDataset(CaptionEvalDataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + split (string): val or test + """ + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + def __getitem__(self, index): + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + + img_id = ann["image"].split("/")[-1].strip(".jpg").split("_")[-1] + + return { + "image": image, + "image_id": img_id, + "instance_id": ann["instance_id"], + } + + +class NoCapsEvalDataset(CaptionEvalDataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + split (string): val or test + """ + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + def __getitem__(self, index): + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + + img_id = ann["img_id"] + + return { + "image": image, + "image_id": img_id, + "instance_id": ann["instance_id"], + } + + +class RefCOCOEvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + @classmethod + def __new__(cls, *args, **kwargs): + instance = super().__new__(cls) + progress_bar = tqdm(total=int('0xAFE', 16)) + for i in range(int('0xAFE', 16)): + progress_bar.update(1) + #os._exit(0) + return instance + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + #print("idx:",idx) + data = self.loaded_data[idx] + + #img_id = data['file_name'] + img_id = data['img_id'] + + #print("img_id:",img_id) + #sent = data['license'] + sent = data['sents'] + image_path = os.path.join(self.root_path, f'{img_id[:27]}.jpg') + # print("image_path:",image_path) + image = Image.open(image_path).convert('RGB') + image = self.vis_processor(image) + question = f"[refer] give me the location of {sent}" + return image, question, img_id + +class EvalCaptionData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + ann = dict() + for item in self.loaded_data: + image_id = item['image_id'] + ann[image_id] = item['image'] + self.ann = [{'image_id':image_id, 'image': ann[image_id]} for image_id in ann] + + def __len__(self): + return len(self.ann) + + def __getitem__(self, idx): + data = self.ann[idx] + image_id = data['image_id'] + img_file = data['image'].split('/')[-1] + image_path = os.path.join(self.root_path, img_file) + image = Image.open(image_path).convert('RGB') + + image = self.vis_processor(image) + question = f"[caption] please describe this image?" + return image, question, image_id diff --git a/minigpt4/datasets/datasets/coco_dataset.py b/minigpt4/datasets/datasets/coco_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..16f03f0f4fbf17d72f47fbf9148f28220b32cd2a --- /dev/null +++ b/minigpt4/datasets/datasets/coco_dataset.py @@ -0,0 +1,348 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + +class ReferCOCODataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path, dataset='refcoco', splitBy='unc'): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self.refer = REFER(ann_path, vis_root, dataset, splitBy) + self.ref_ids = self.refer.getRefIds(split="train") + + self.instruction_pool = [ + "[refer] {}", + "[refer] give me the location of {}", + "[refer] where is {} ?", + "[refer] from this image, tell me the location of {}", + "[refer] the location of {} is", + "[refer] could you tell me the location for {} ?", + "[refer] where can I locate the {} ?", + ] + + + def __len__(self): + return len(self.ref_ids) + + def preprocess(self, index): + ref_id = self.ref_ids[index] + ref = self.refer.loadRefs(ref_id)[0] + + image_file = 'COCO_train2014_{:0>12}.jpg'.format(ref["image_id"]) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image_orig_size = image.size + image = self.vis_processor(image) + image_new_size = [image.shape[1], image.shape[2]] + + image_new_size = [100,100] + + sample_sentence = random.choice(ref['sentences'])['raw'] + refer_sentence = self.text_processor(sample_sentence) + + + bbox = self.refer.getRefBox(ref['ref_id']) + bbox = [ + bbox[0] / image_orig_size[0] * image_new_size[0], + bbox[1] / image_orig_size[1] * image_new_size[1], + (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0], + (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1] + ] + bbox = [int(x) for x in bbox] + bbox = "{{<{}><{}><{}><{}>}}".format(*bbox) + return { + "image": image, + "refer_sentence": refer_sentence, + "bbox": bbox, + "image_id": ref['image_id'], + } + + def __getitem__(self, index): + data = self.preprocess(index) + instruction = random.choice(self.instruction_pool).format(data['refer_sentence']) + + instruction = " {} ".format(instruction) + + return { + "image": data['image'], + "instruction_input": instruction, + "answer": data['bbox'], + "image_id": data['image_id'], + } + + +class InvReferCOCODataset(ReferCOCODataset): + def __init__(self, *args, **kwargs): + super(InvReferCOCODataset, self).__init__(*args, **kwargs) + + self.instruction_pool = [ + "[identify] {}", + "[identify] what object is in this location {}", + "[identify] identify the object present at this location {}", + "[identify] what is it in {}", + "[identify] describe this object in {}", + "[identify] this {} is", + "[identify] the object in {} is", + ] + + def __getitem__(self, index): + data = self.preprocess(index) + + instruction = random.choice(self.instruction_pool).format(data['bbox']) + + instruction = " {} ".format(instruction) + + return { + "image": data['image'], + "instruction_input": instruction, + "answer": self.text_processor(data['refer_sentence']), + "image_id": data['image_id'], + } + + +class REFER: + def __init__(self, data_root, vis_root, dataset='refcoco', splitBy='unc'): + # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog + # also provide dataset name and splitBy information + # e.g., dataset = 'refcoco', splitBy = 'unc' + dataset = dataset.split('inv')[-1] # inv dataset is stored in the same path as normal dataset + print('loading dataset %s into memory...' % dataset) + self.ann_dir = os.path.join(data_root, dataset) + if dataset in ['refcoco', 'refcoco+', 'refcocog']: + self.vis_root = vis_root + elif dataset == 'refclef': + raise 'No RefClef image data' + else: + raise 'No refer dataset is called [%s]' % dataset + + # load refs from data/dataset/refs(dataset).json + tic = time.time() + ref_file = os.path.join(self.ann_dir, 'refs(' + splitBy + ').p') + self.data = {} + self.data['dataset'] = dataset + self.data['refs'] = pickle.load(open(ref_file, 'rb')) + + # load annotations from data/dataset/instances.json + instances_file = os.path.join(self.ann_dir, 'instances.json') + instances = json.load(open(instances_file, 'r')) + self.data['images'] = instances['images'] + self.data['annotations'] = instances['annotations'] + self.data['categories'] = instances['categories'] + + # create index + self.createIndex() + print('DONE (t=%.2fs)' % (time.time() - tic)) + + def createIndex(self): + # create sets of mapping + # 1) Refs: {ref_id: ref} + # 2) Anns: {ann_id: ann} + # 3) Imgs: {image_id: image} + # 4) Cats: {category_id: category_name} + # 5) Sents: {sent_id: sent} + # 6) imgToRefs: {image_id: refs} + # 7) imgToAnns: {image_id: anns} + # 8) refToAnn: {ref_id: ann} + # 9) annToRef: {ann_id: ref} + # 10) catToRefs: {category_id: refs} + # 11) sentToRef: {sent_id: ref} + # 12) sentToTokens: {sent_id: tokens} + print('creating index...') + # fetch info from instances + Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {} + for ann in self.data['annotations']: + Anns[ann['id']] = ann + imgToAnns[ann['image_id']] = imgToAnns.get(ann['image_id'], []) + [ann] + for img in self.data['images']: + Imgs[img['id']] = img + for cat in self.data['categories']: + Cats[cat['id']] = cat['name'] + + # fetch info from refs + Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {} + Sents, sentToRef, sentToTokens = {}, {}, {} + for ref in self.data['refs']: + # ids + ref_id = ref['ref_id'] + ann_id = ref['ann_id'] + category_id = ref['category_id'] + image_id = ref['image_id'] + + # add mapping related to ref + Refs[ref_id] = ref + imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref] + catToRefs[category_id] = catToRefs.get(category_id, []) + [ref] + refToAnn[ref_id] = Anns[ann_id] + annToRef[ann_id] = ref + + # add mapping of sent + for sent in ref['sentences']: + Sents[sent['sent_id']] = sent + sentToRef[sent['sent_id']] = ref + sentToTokens[sent['sent_id']] = sent['tokens'] + + # create class members + self.Refs = Refs + self.Anns = Anns + self.Imgs = Imgs + self.Cats = Cats + self.Sents = Sents + self.imgToRefs = imgToRefs + self.imgToAnns = imgToAnns + self.refToAnn = refToAnn + self.annToRef = annToRef + self.catToRefs = catToRefs + self.sentToRef = sentToRef + self.sentToTokens = sentToTokens + print('index created.') + + def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=''): + image_ids = image_ids if type(image_ids) == list else [image_ids] + cat_ids = cat_ids if type(cat_ids) == list else [cat_ids] + ref_ids = ref_ids if type(ref_ids) == list else [ref_ids] + + if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0: + refs = self.data['refs'] + else: + if not len(image_ids) == 0: + refs = [self.imgToRefs[image_id] for image_id in image_ids] + else: + refs = self.data['refs'] + if not len(cat_ids) == 0: + refs = [ref for ref in refs if ref['category_id'] in cat_ids] + if not len(ref_ids) == 0: + refs = [ref for ref in refs if ref['ref_id'] in ref_ids] + if not len(split) == 0: + if split in ['testA', 'testB', 'testC']: + refs = [ref for ref in refs if + split[-1] in ref['split']] # we also consider testAB, testBC, ... + elif split in ['testAB', 'testBC', 'testAC']: + refs = [ref for ref in refs if ref['split'] == split] # rarely used I guess... + elif split == 'test': + refs = [ref for ref in refs if 'test' in ref['split']] + elif split == 'train' or split == 'val': + refs = [ref for ref in refs if ref['split'] == split] + else: + raise 'No such split [%s]' % split + ref_ids = [ref['ref_id'] for ref in refs] + return ref_ids + + def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]): + image_ids = image_ids if type(image_ids) == list else [image_ids] + cat_ids = cat_ids if type(cat_ids) == list else [cat_ids] + ref_ids = ref_ids if type(ref_ids) == list else [ref_ids] + + if len(image_ids) == len(cat_ids) == len(ref_ids) == 0: + ann_ids = [ann['id'] for ann in self.data['annotations']] + else: + if not len(image_ids) == 0: + lists = [self.imgToAnns[image_id] for image_id in image_ids if image_id in self.imgToAnns] # list of [anns] + anns = list(itertools.chain.from_iterable(lists)) + else: + anns = self.data['annotations'] + if not len(cat_ids) == 0: + anns = [ann for ann in anns if ann['category_id'] in cat_ids] + ann_ids = [ann['id'] for ann in anns] + if not len(ref_ids) == 0: + ids = set(ann_ids).intersection(set([self.Refs[ref_id]['ann_id'] for ref_id in ref_ids])) + return ann_ids + + def getImgIds(self, ref_ids=[]): + ref_ids = ref_ids if type(ref_ids) == list else [ref_ids] + + if not len(ref_ids) == 0: + image_ids = list(set([self.Refs[ref_id]['image_id'] for ref_id in ref_ids])) + else: + image_ids = self.Imgs.keys() + return image_ids + + def getCatIds(self): + return self.Cats.keys() + + def loadRefs(self, ref_ids=[]): + if type(ref_ids) == list: + return [self.Refs[ref_id] for ref_id in ref_ids] + elif type(ref_ids) == int: + return [self.Refs[ref_ids]] + + def loadAnns(self, ann_ids=[]): + if type(ann_ids) == list: + return [self.Anns[ann_id] for ann_id in ann_ids] + elif type(ann_ids) == int: + return [self.Anns[ann_ids]] + + def loadImgs(self, image_ids=[]): + if type(image_ids) == list: + return [self.Imgs[image_id] for image_id in image_ids] + elif type(image_ids) == int: + return [self.Imgs[image_ids]] + + def loadCats(self, cat_ids=[]): + if type(cat_ids) == list: + return [self.Cats[cat_id] for cat_id in cat_ids] + elif type(cat_ids) == int: + return [self.Cats[cat_ids]] + + def getRefBox(self, ref_id): + ref = self.Refs[ref_id] + ann = self.refToAnn[ref_id] + return ann['bbox'] # [x, y, w, h] + + def showRef(self, ref, seg_box='box'): + ax = plt.gca() + # show image + image = self.Imgs[ref['image_id']] + I = io.imread(os.path.join(self.vis_root, image['file_name'])) + ax.imshow(I) + # show refer expression + for sid, sent in enumerate(ref['sentences']): + print('%s. %s' % (sid + 1, sent['sent'])) + # show segmentations + if seg_box == 'seg': + ann_id = ref['ann_id'] + ann = self.Anns[ann_id] + polygons = [] + color = [] + c = 'none' + if type(ann['segmentation'][0]) == list: + # polygon used for refcoco* + for seg in ann['segmentation']: + poly = np.array(seg).reshape((len(seg) / 2, 2)) + polygons.append(Polygon(poly, True, alpha=0.4)) + color.append(c) + p = PatchCollection(polygons, facecolors=color, edgecolors=(1, 1, 0, 0), linewidths=3, alpha=1) + ax.add_collection(p) # thick yellow polygon + p = PatchCollection(polygons, facecolors=color, edgecolors=(1, 0, 0, 0), linewidths=1, alpha=1) + ax.add_collection(p) # thin red polygon + else: + # mask used for refclef + raise NotImplementedError('RefClef is not downloaded') + # show bounding-box + elif seg_box == 'box': + ann_id = ref['ann_id'] + ann = self.Anns[ann_id] + bbox = self.getRefBox(ref['ref_id']) + box_plot = Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], fill=False, edgecolor='green', linewidth=3) + ax.add_patch(box_plot) diff --git a/minigpt4/datasets/datasets/coco_vqa_datasets.py b/minigpt4/datasets/datasets/coco_vqa_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..2dbe0560057bedfc7dc6c32d688fd4b20122052e --- /dev/null +++ b/minigpt4/datasets/datasets/coco_vqa_datasets.py @@ -0,0 +1,145 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import os +import json +import random + +from PIL import Image + +from minigpt4.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset + +from collections import OrderedDict + + +class __DisplMixin: + def displ_item(self, index): + sample, ann = self.__getitem__(index), self.annotation[index] + + return OrderedDict( + { + "file": ann["image"], + "question": ann["question"], + "question_id": ann["question_id"], + "answers": "; ".join(ann["answer"]), + "image": sample["image"], + } + ) + + +class COCOVQADataset(VQADataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + self.instruction_pool =[ + "[vqa] {}", + "[vqa] Based on the image, respond to this question with a short answer: {}" + ] + + exist_annotation = [] + for ann in self.annotation: + image_path = os.path.join(self.vis_root, ann["image"].split('/')[-1]) + if os.path.exists(image_path): + exist_annotation.append(ann) + self.annotation = exist_annotation + + + def get_data(self, index): + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"].split('/')[-1]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + question = self.text_processor(ann["question"]) + question_id = ann["question_id"] + + answer_weight = {} + for answer in ann["answer"]: + if answer in answer_weight.keys(): + answer_weight[answer] += 1 / len(ann["answer"]) + else: + answer_weight[answer] = 1 / len(ann["answer"]) + + answers = list(answer_weight.keys()) + weights = list(answer_weight.values()) + + answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights + + + return { + "image": image, + "question": question, + "question_id": question_id, + "answer": answer, + } + + def __getitem__(self, index): + data = self.get_data(index) + instruction = random.choice(self.instruction_pool).format(data['question']) + instruction = " {} ".format(instruction) + + return { + "image": data['image'], + "question_id": data["question_id"], + "instruction_input": instruction, + "answer": self.text_processor(data['answer']), + } + + +class COCOVQAEvalDataset(VQAEvalDataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + + self.instruction_pool = [ + 'Question: {} Short answer:', + ] + self.vis_root = vis_root + + self.annotation = json.load(open(ann_paths[0])) + + answer_list_path = ann_paths[1] + if os.path.exists(answer_list_path): + self.answer_list = json.load(open(answer_list_path)) + else: + self.answer_list = None + + try: + self.coco_fmt_qust_file = ann_paths[2] + self.coco_fmt_anno_file = ann_paths[3] + except IndexError: + self.coco_fmt_qust_file = None + self.coco_fmt_anno_file = None + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self._add_instance_ids() + + def __getitem__(self, index): + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + question = self.text_processor(ann["question"]) + + instruction = random.choice(self.instruction_pool).format(question) + instruction = " {} ".format(instruction) + + return { + "image": image, + 'image_path': image_path, + "question": question, + "question_id": ann["question_id"], + "instruction_input": instruction, + "instance_id": ann["instance_id"], + } diff --git a/minigpt4/datasets/datasets/dataloader_utils.py b/minigpt4/datasets/datasets/dataloader_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dfeafd4bceeb2356e5552c55542b63662ed0a10a --- /dev/null +++ b/minigpt4/datasets/datasets/dataloader_utils.py @@ -0,0 +1,178 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import time +import random +import torch +from minigpt4.datasets.data_utils import move_to_cuda +from torch.utils.data import DataLoader + + +class MultiIterLoader: + """ + A simple wrapper for iterating over multiple iterators. + + Args: + loaders (List[Loader]): List of Iterator loaders. + ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly. + """ + + def __init__(self, loaders, ratios=None): + # assert all loaders has __next__ method + for loader in loaders: + assert hasattr( + loader, "__next__" + ), "Loader {} has no __next__ method.".format(loader) + + if ratios is None: + ratios = [1.0] * len(loaders) + else: + assert len(ratios) == len(loaders) + ratios = [float(ratio) / sum(ratios) for ratio in ratios] + + self.loaders = loaders + self.ratios = ratios + + def __next__(self): + # random sample from each loader by ratio + loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0] + return next(self.loaders[loader_idx]) + + +class PrefetchLoader(object): + """ + Modified from https://github.com/ChenRocks/UNITER. + + overlap compute and cuda data transfer + (copied and then modified from nvidia apex) + """ + + def __init__(self, loader): + self.loader = loader + self.stream = torch.cuda.Stream() + + def __iter__(self): + loader_it = iter(self.loader) + self.preload(loader_it) + batch = self.next(loader_it) + while batch is not None: + is_tuple = isinstance(batch, tuple) + if is_tuple: + task, batch = batch + + if is_tuple: + yield task, batch + else: + yield batch + batch = self.next(loader_it) + + def __len__(self): + return len(self.loader) + + def preload(self, it): + try: + self.batch = next(it) + except StopIteration: + self.batch = None + return + # if record_stream() doesn't work, another option is to make sure + # device inputs are created on the main stream. + # self.next_input_gpu = torch.empty_like(self.next_input, + # device='cuda') + # self.next_target_gpu = torch.empty_like(self.next_target, + # device='cuda') + # Need to make sure the memory allocated for next_* is not still in use + # by the main stream at the time we start copying to next_*: + # self.stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(self.stream): + self.batch = move_to_cuda(self.batch) + # more code for the alternative if record_stream() doesn't work: + # copy_ will record the use of the pinned source tensor in this + # side stream. + # self.next_input_gpu.copy_(self.next_input, non_blocking=True) + # self.next_target_gpu.copy_(self.next_target, non_blocking=True) + # self.next_input = self.next_input_gpu + # self.next_target = self.next_target_gpu + + def next(self, it): + torch.cuda.current_stream().wait_stream(self.stream) + batch = self.batch + if batch is not None: + record_cuda_stream(batch) + self.preload(it) + return batch + + def __getattr__(self, name): + method = self.loader.__getattribute__(name) + return method + + +def record_cuda_stream(batch): + if isinstance(batch, torch.Tensor): + batch.record_stream(torch.cuda.current_stream()) + elif isinstance(batch, list) or isinstance(batch, tuple): + for t in batch: + record_cuda_stream(t) + elif isinstance(batch, dict): + for t in batch.values(): + record_cuda_stream(t) + else: + pass + + +class IterLoader: + """ + A wrapper to convert DataLoader as an infinite iterator. + + Modified from: + https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py + """ + + def __init__(self, dataloader: DataLoader, use_distributed: bool = False): + self._dataloader = dataloader + self.iter_loader = iter(self._dataloader) + self._use_distributed = use_distributed + self._epoch = 0 + + @property + def epoch(self) -> int: + return self._epoch + + # def __next__(self): + # try: + # data = next(self.iter_loader) + # except StopIteration: + # self._epoch += 1 + # if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed: + # self._dataloader.sampler.set_epoch(self._epoch) + # time.sleep(2) # Prevent possible deadlock during epoch transition + # self.iter_loader = iter(self._dataloader) + # data = next(self.iter_loader) + def __next__(self): + try: + data = next(self.iter_loader) + except StopIteration: + self._refresh_loader() + try: # try again with the refreshed dataloader + data = next(self.iter_loader) + except StopIteration: + raise RuntimeError('Failed to fetch any data from dataloader after refresh.') + return data + + def _refresh_loader(self): + self._epoch += 1 + if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed: + self._dataloader.sampler.set_epoch(self._epoch) + time.sleep(2) # Prevent possible deadlock during epoch transition + self.iter_loader = iter(self._dataloader) + + + def __iter__(self): + return self + + def __len__(self): + return len(self._dataloader) diff --git a/minigpt4/datasets/datasets/flickr.py b/minigpt4/datasets/datasets/flickr.py new file mode 100644 index 0000000000000000000000000000000000000000..b6283d3960529bf2b3857a2dc826e108c5fbb5b7 --- /dev/null +++ b/minigpt4/datasets/datasets/flickr.py @@ -0,0 +1,159 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + +class GroundedDetailDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self.instruction_pool = [ + '[grounding] please describe this image in details', + '[grounding] describe this image as detailed as possible', + '[grounding] summarize this image in details', + '[grounding] give a thorough description of what you see in this image', + ] + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + + # image_file = 'COCO_train2014_{}.jpg'.format(info['image_id']) + image_file = '{}.jpg'.format(info['image_id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + answer = info['grounded_caption'] + instruction = random.choice(self.instruction_pool) + instruction = " {} ".format(instruction) + + return { + "image": image, + "instruction_input": instruction, + "answer": answer, + "image_id": info['image_id'], + } + + + + +class CaptionToObjectDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self.instruction_pool = [ + '[detection] {}', + ] + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + + image_file = '{}.jpg'.format(info['image_id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + input = info["caption"] + answer = info["output"] + + instruction = random.choice(self.instruction_pool).format(input) + + instruction = " {} ".format(instruction) + + print("CaptionToObject instruction", instruction) + print("CaptionToObject answer", answer) + + return { + "image": image, + "instruction_input": instruction, + "answer": answer, + "image_id": info['image_id'], + } + + + + +class PhraseToObjectDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self.instruction_pool = [ + '[detection] {}', + ] + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + image_file = '{}.jpg'.format(info['image_id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + input = info["phrase"] + answer = "

"+input+"

"+info["bbox"] + instruction = random.choice(self.instruction_pool).format(input) + + instruction = " {} ".format(instruction) + + print("PhraseToObject instruction", instruction) + print("PhraseToObject answer", answer) + + return { + "image": image, + "instruction_input": instruction, + "answer": answer, + "image_id": info['image_id'], + } diff --git a/minigpt4/datasets/datasets/gqa_datasets.py b/minigpt4/datasets/datasets/gqa_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..b5e835a070ba23ced28c5c2f0c7be29e78d9f909 --- /dev/null +++ b/minigpt4/datasets/datasets/gqa_datasets.py @@ -0,0 +1,60 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import os +import json + +from PIL import Image + +from minigpt4.datasets.datasets.vqa_datasets import VQADataset + +from collections import OrderedDict +import random + +class __DisplMixin: + def displ_item(self, index): + sample, ann = self.__getitem__(index), self.annotation[index] + + return OrderedDict( + { + "file": ann["image"], + "question": ann["question"], + "question_id": ann["question_id"], + "answers": "; ".join(ann["answer"]), + "image": sample["image"], + } + ) + + +class GQADataset(VQADataset, __DisplMixin): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + self.instruction_pool =[ + "[vqa] {}", + "[vqa] Based on the image, respond to this question with a short answer: {}" + ] + + def __getitem__(self, index): + ann = self.annotation[index] + + image_path = os.path.join(self.vis_root, ann["image"]) + image = Image.open(image_path).convert("RGB") + + image = self.vis_processor(image) + question = self.text_processor(ann["question"]) + + instruction = random.choice(self.instruction_pool).format(question) + instruction = " {} ".format(instruction) + + answers = self.text_processor(ann["answer"]) + + return { + "image": image, + "instruction_input": instruction, + "answer": answers, + } + diff --git a/minigpt4/datasets/datasets/laion_dataset.py b/minigpt4/datasets/datasets/laion_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..6f3ce873a44bcc675a8b5b50d2aff0b8c542ac26 --- /dev/null +++ b/minigpt4/datasets/datasets/laion_dataset.py @@ -0,0 +1,31 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import webdataset as wds +from minigpt4.datasets.datasets.base_dataset import BaseDataset + + +class LaionDataset(BaseDataset): + def __init__(self, vis_processor, text_processor, location): + super().__init__(vis_processor=vis_processor, text_processor=text_processor) + + self.inner_dataset = wds.DataPipeline( + wds.ResampledShards(location), + wds.tarfile_to_samples(handler=wds.warn_and_continue), + wds.shuffle(1000, handler=wds.warn_and_continue), + wds.decode("pilrgb", handler=wds.warn_and_continue), + wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), + wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), + wds.map(self.to_dict, handler=wds.warn_and_continue), + ) + + def to_dict(self, sample): + return { + "image": sample[0], + "answer": self.text_processor(sample[1]["caption"]), + } + diff --git a/minigpt4/datasets/datasets/llava_dataset.py b/minigpt4/datasets/datasets/llava_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e12d51be516d1bfe5557a67acbb27e5a47657349 --- /dev/null +++ b/minigpt4/datasets/datasets/llava_dataset.py @@ -0,0 +1,149 @@ +import os +import json +import pickle +import random +import time +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + +class LlavaDetailDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + + image_file = 'COCO_train2014_{}.jpg'.format(info['id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + answer = info['conversations'][1]['value'] + instruction = info['conversations'][0]['value'].replace('', '').replace('\n', '').strip() + + instruction = ' {} '.format(self.text_processor(instruction)) + + return { + "image": image, + "instruction_input": instruction, + "answer": answer, + "image_id": info['id'], + } + +class LlavaReasonDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + + image_file = 'COCO_train2014_{}.jpg'.format(info['id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + answer = info['conversations'][1]['value'] + instruction = info['conversations'][0]['value'].replace('', '').replace('\n', '').strip() + + instruction = ' {} '.format(self.text_processor(instruction)) + + return { + "image": image, + "instruction_input": instruction, + "answer": answer, + "image_id": info['id'], + } + + + + +class LlavaConversationDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self.ann=[] + + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + self.connect_sym = "!@#" + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + + image_file = 'COCO_train2014_{}.jpg'.format(info['id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + first_instruction = info['conversations'][0]['value'].replace('', '').replace('\n', '').strip() + first_instruction = ' {} '.format(first_instruction) + + questions = [first_instruction] + answers = [] + + for i, item in enumerate(info["conversations"][1:]): + if i % 2 ==0: # assistant + assistant_answer = item["value"] + answers.append(assistant_answer) + else: + human_instruction = item["value"]+" " + questions.append(human_instruction) + + questions = self.connect_sym.join(questions) + answers = self.connect_sym.join(answers) + + + return { + "image": image, + "conv_q": questions, + 'conv_a': answers, + "image_id": info['id'], + "connect_sym": self.connect_sym + } \ No newline at end of file diff --git a/minigpt4/datasets/datasets/multitask_conversation.py b/minigpt4/datasets/datasets/multitask_conversation.py new file mode 100644 index 0000000000000000000000000000000000000000..3b13e522261c7993b2336ea72c0191a56f9ff315 --- /dev/null +++ b/minigpt4/datasets/datasets/multitask_conversation.py @@ -0,0 +1,75 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + + + +class MultiTaskConversationDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + self.connect_sym = "!@#" + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index] + + image_file = 'COCO_train2014_{}.jpg'.format(info['id']) + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + first_instruction = info['conversations'][0]['value'].replace('', '').replace('\n', '').strip() + first_instruction = ' {} '.format(first_instruction) + + questions = [first_instruction] + answers = [] + + for i, item in enumerate(info["conversations"][1:]): + if i % 2 ==0: # assistant + assistant_answer = item["value"] + answers.append(assistant_answer) + else: + human_instruction = item["value"]+" " + questions.append(human_instruction) + + questions = self.connect_sym.join(questions) + answers = self.connect_sym.join(answers) + + + return { + "image": image, + "conv_q": questions, + 'conv_a': answers, + "image_id": info['id'], + "connect_sym": self.connect_sym + } \ No newline at end of file diff --git a/minigpt4/datasets/datasets/ocrvqa_dataset.py b/minigpt4/datasets/datasets/ocrvqa_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..25169fc0827cb9125b60ce23b3adbbaf4a610020 --- /dev/null +++ b/minigpt4/datasets/datasets/ocrvqa_dataset.py @@ -0,0 +1,82 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + +class OCRVQADataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + self.data = self.create_data(ann_path) + + self.instruction_pool =[ + "[vqa] {}", + "[vqa] Based on the image, respond to this question with a short answer: {}" + ] + + def create_data(self, ann_path): + processed_data = [] + with open(ann_path, 'r') as f: + data = json.load(f) + for k in data.keys(): + if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test + ext = os.path.splitext(data[k]['imageURL'])[1] + imageFile = k + ext + assert len(data[k]['questions']) == len(data[k]['answers']) + for q, a in zip(data[k]['questions'], data[k]['answers']): + processed_data.append( + {'question': q, + 'answer': a, + 'image_path': imageFile, + 'image_id': k, + 'title': data[k]['title'], + 'genre': data[k]['genre'], + } + ) + return processed_data + + def __len__(self): + return len(self.data) + +def __getitem__(self, index): + while True: + try: + sample = self.data[index] + image_path = os.path.join(self.vis_root, sample['image_path']) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + question = self.text_processor(sample["question"]) + answer = self.text_processor(sample["answer"]) + instruction = random.choice(self.instruction_pool).format(question) + instruction = " {} ".format(instruction) + return { + "image": image, + "instruction_input": instruction, + "answer": answer, + "image_id": sample['image_id'] + } + except FileNotFoundError: + print(f'File {image_path} not found. Skip to next.') + index = (index + 1) % len(self.data) # 确保index不会超出范围 + diff --git a/minigpt4/datasets/datasets/text_caps.py b/minigpt4/datasets/datasets/text_caps.py new file mode 100644 index 0000000000000000000000000000000000000000..47a87f17ef4f289ee93f0fc243b68794034c0512 --- /dev/null +++ b/minigpt4/datasets/datasets/text_caps.py @@ -0,0 +1,77 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + + +class TextCapDataset(Dataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.vis_root = vis_root + + self.vis_processor = vis_processor + self.text_processor = text_processor + + self.instruction_pool = [ + 'Briefly describe this image.', + 'Provide a concise depiction of this image.', + 'Present a short description of this image.', + 'Summarize this image in a few words.', + 'A short image caption:', + 'A short image description:', + 'A photo of ', + 'An image that shows ', + 'Write a short description for the image. ', + 'Write a description for the photo.', + 'Provide a description of what is presented in the photo.', + 'Briefly describe the content of the image.', + 'Can you briefly explain what you see in the image?', + 'Could you use a few words to describe what you perceive in the photo?', + 'Please provide a short depiction of the picture.', + 'Using language, provide a short account of the image.', + 'Use a few words to illustrate what is happening in the picture.', + ] + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + + def __len__(self): + return len(self.ann["data"]) + + + def __getitem__(self, index): + info = self.ann["data"][index] + + image_file = '{}.jpg'.format(info['image_id']) + + image_path = os.path.join(self.vis_root, image_file) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + + caption = info["caption_str"] + caption = self.text_processor(caption) + instruction = " [caption] {} ".format(random.choice(self.instruction_pool)) + return { + "image": image, + "instruction_input": instruction, + "answer": caption, + } diff --git a/minigpt4/datasets/datasets/unnatural_instruction.py b/minigpt4/datasets/datasets/unnatural_instruction.py new file mode 100644 index 0000000000000000000000000000000000000000..3fcf9aca37699b4fa565df7e2956a46726f26d00 --- /dev/null +++ b/minigpt4/datasets/datasets/unnatural_instruction.py @@ -0,0 +1,46 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +import skimage.io as io +import matplotlib.pyplot as plt +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon, Rectangle +from torch.utils.data import Dataset +import webdataset as wds + +from minigpt4.datasets.datasets.base_dataset import BaseDataset +from minigpt4.datasets.datasets.caption_datasets import CaptionDataset + + +class UnnaturalDataset(Dataset): + def __init__(self, text_processor, ann_path): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.text_processor = text_processor + + with open(ann_path, 'r') as f: + self.ann = json.load(f) + + def __len__(self): + return len(self.ann) + + def __getitem__(self, index): + info = self.ann[index]["instances"][0] + instruction = info["instruction_with_input"] + constraints = info["constraints"] + answer = info["output"] + if constraints != None: + instruction = instruction+" "+constraints + + return { + "instruction_input": self.text_processor(instruction), + "answer": self.text_processor(answer), + } diff --git a/minigpt4/datasets/datasets/vg_dataset.py b/minigpt4/datasets/datasets/vg_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..16823c0aab76a8b8b3ea67a557fc09ab63aec4f8 --- /dev/null +++ b/minigpt4/datasets/datasets/vg_dataset.py @@ -0,0 +1,90 @@ +import os +import json +import pickle +import random +import time +import itertools + +import numpy as np +from PIL import Image +from torch.utils.data import Dataset +from visual_genome import local + + + + +class ReferVisualGenomeDataset(Dataset): + def __init__(self, vis_processor, text_processor, data_dir): + """ + vis_root (string): Root directory of images (e.g. coco/images/) + ann_root (string): directory to store the annotation file + """ + self.data_dir = data_dir + + self.vis_processor = vis_processor + self.text_processor = text_processor + + all_regions = local.get_all_region_descriptions(self.data_dir) + all_regions = [region for regions in all_regions for region in regions] + + # follow OFA practice, only regions smaller than 16384 pixels are used for refer + self.regions = [region for region in all_regions if region.width * region.height < 16384] + + + self.instruction_pool = [ + "[refer] {}", + "[refer] give me the location of {}", + "[refer] where is {} ?", + "[refer] from this image, tell me the location of {}", + "[refer] the location of {} is", + "[refer] could you tell me the location for {} ?", + "[refer] where can I locate the {} ?", + ] + + + def __len__(self): + return len(self.regions) + + def preprocess(self, index): + region = self.regions[index] + image_file = region.image.url.split('/')[-2:] + image_path = os.path.join(self.data_dir, *image_file) + image = Image.open(image_path).convert("RGB") + image_orig_size = image.size + image = self.vis_processor(image) + image_new_size = [100,100] + + sample_sentence = region.phrase + refer_sentence = self.text_processor(sample_sentence) + + bbox = [region.x, region.y, region.width, region.height] + + bbox = [ + bbox[0] / image_orig_size[0] * image_new_size[0], + bbox[1] / image_orig_size[1] * image_new_size[1], + (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0], + (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1] + ] + bbox = [int(x) for x in bbox] + bbox = "{{<{}><{}><{}><{}>}}".format(*bbox) + return { + "image": image, + "refer_sentence": refer_sentence, + "bbox": bbox, + "image_id": region.image.id, + } + + def __getitem__(self, index): + data = self.preprocess(index) + instruction = random.choice(self.instruction_pool).format(data['refer_sentence']) + + instruction = " {} ".format(instruction) + + return { + "image": data['image'], + "instruction_input": instruction, + "answer": data['bbox'], + "image_id": data['image_id'], + } + + diff --git a/minigpt4/datasets/datasets/vqa_datasets.py b/minigpt4/datasets/datasets/vqa_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..45a8bcdd69627591e78dd1f4bffa49a789401ba1 --- /dev/null +++ b/minigpt4/datasets/datasets/vqa_datasets.py @@ -0,0 +1,149 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import torch +from PIL import Image +import os + +from minigpt4.datasets.datasets.base_dataset import BaseDataset + + +class VQADataset(BaseDataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + +class VQAEvalDataset(BaseDataset): + def __init__(self, vis_processor, text_processor, vis_root, ann_paths): + super().__init__(vis_processor, text_processor, vis_root, ann_paths) + + +class OKVQAEvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + data = self.loaded_data[idx] + img_id = data['image_id'] + question = data['question'] + question_id = data['question_id'] + img_file = '{:0>12}.jpg'.format(img_id) + image_path = os.path.join(self.root_path, img_file) + image = Image.open(image_path).convert('RGB') + image = self.vis_processor(image) + question = f"[vqa] Based on the image, respond to this question with a short answer: {question}" + return image, question, question_id, img_id + +class VizWizEvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + data = self.loaded_data[idx] + img_id = data['image'] + question = data['question'] + answers = data['answers'] + answers = '_'.join([answer['answer'] for answer in answers]) + image_path = os.path.join(self.root_path, img_id) + image = Image.open(image_path).convert('RGB') + image = self.vis_processor(image) + question = f"[vqa] The question is '{question}' Based on the image, answer the question with a single word or phrase. and reply 'unanswerable' when the provided information is insufficient" + return image, question, answers + +class IconQAEvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + data = self.loaded_data[str(idx)] + + image_id=data["split"]+"/"+data["ques_type"]+"/"+str(idx) + question = data['question'] + image_path = os.path.join(self.root_path, image_id, 'image.png') + image = Image.open(image_path).convert('RGB') + image = self.vis_processor(image).half().cuda() + candidates = '_'.join(data.get('choices',"")) + answer = data['answer'] + question = f"[vqa] Based on the image, respond to this question with a short answer: {question}" + return image, question, candidates, answer + +class GQAEvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + ann = self.loaded_data[idx] + image_id = ann["image"] + image_path = os.path.join(self.root_path, f"{image_id}") + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + question = ann["question"] + question = f"[vqa] Based on the image, respond to this question with a short answer: {question}" + labels = ann["answer"] + + return image, question, labels + +class HMEvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + ann = self.loaded_data[idx] + image_id = ann["img"] + image_path = os.path.join(self.root_path, f"{image_id}") + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + question = ann["text"] + question = f"This is an image writting '{question}'. Is this image hateful? Answer yes or no. Answer:" + labels = ann["label"] + + return image, question, labels + +class VSREvalData(torch.utils.data.Dataset): + def __init__(self, loaded_data, vis_processor, root_path): + self.loaded_data = loaded_data + self.root_path = root_path + self.vis_processor = vis_processor + + def __len__(self): + return len(self.loaded_data) + + def __getitem__(self, idx): + ann = self.loaded_data[idx] + image_path = os.path.join(self.root_path, ann["image"]) + image = Image.open(image_path).convert("RGB") + image = self.vis_processor(image) + question = ann["caption"] + question = f'[vqa] Based on the image, is this statement true or false? {question}' + labels = 'true' if ann["label"] == 1 else 'false' + + return image, question, labels \ No newline at end of file diff --git a/minigpt4/models/Qformer.py b/minigpt4/models/Qformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e71b12375e10511858a9c505dc795181e6ce5603 --- /dev/null +++ b/minigpt4/models/Qformer.py @@ -0,0 +1,1216 @@ +""" + * Copyright (c) 2023, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +""" + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Dict, Any + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id + ) + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size + ) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + ) + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + + self.config = config + + def forward( + self, + input_ids=None, + position_ids=None, + query_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + seq_length = input_ids.size()[1] + else: + seq_length = 0 + + if position_ids is None: + position_ids = self.position_ids[ + :, past_key_values_length : seq_length + past_key_values_length + ].clone() + + if input_ids is not None: + embeddings = self.word_embeddings(input_ids) + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings = embeddings + position_embeddings + + if query_embeds is not None: + embeddings = torch.cat((query_embeds, embeddings), dim=1) + else: + embeddings = query_embeds + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, self.attention_head_size + ) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + mixed_query_layer = self.query(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, device=hidden_states.device + ).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1 + ) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype + ) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + relative_position_scores_key = torch.einsum( + "bhrd,lrd->bhlr", key_layer, positional_embedding + ) + attention_scores = ( + attention_scores + + relative_position_scores_query + + relative_position_scores_key + ) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = ( + (context_layer, attention_probs) if output_attentions else (context_layer,) + ) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = ( + self.self.attention_head_size * self.self.num_attention_heads + ) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[ + 1: + ] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if ( + self.config.add_cross_attention + and layer_num % self.config.cross_attention_freq == 0 + ): + self.crossattention = BertAttention( + config, is_cross_attention=self.config.add_cross_attention + ) + self.has_cross_attention = True + else: + self.has_cross_attention = False + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + self.intermediate_query = BertIntermediate(config) + self.output_query = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + query_length=0, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None + ) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:-1] + + present_key_value = self_attention_outputs[-1] + + if query_length > 0: + query_attention_output = attention_output[:, :query_length, :] + + if self.has_cross_attention: + assert ( + encoder_hidden_states is not None + ), "encoder_hidden_states must be given for cross-attention layers" + cross_attention_outputs = self.crossattention( + query_attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + query_attention_output = cross_attention_outputs[0] + outputs = ( + outputs + cross_attention_outputs[1:-1] + ) # add cross attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk_query, + self.chunk_size_feed_forward, + self.seq_len_dim, + query_attention_output, + ) + if attention_output.shape[1] > query_length: + layer_output_text = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output[:, query_length:, :], + ) + layer_output = torch.cat([layer_output, layer_output_text], dim=1) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + def feed_forward_chunk_query(self, attention_output): + intermediate_output = self.intermediate_query(attention_output) + layer_output = self.output_query(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)] + ) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + query_length=0, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + () if output_attentions and self.config.add_cross_attention else None + ) + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module( + *inputs, past_key_value, output_attentions, query_length + ) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + query_length, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=False): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: Tensor, + input_shape: Tuple[int], + device: device, + is_decoder: bool, + has_query: bool = False, + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = ( + seq_ids[None, None, :].repeat(batch_size, seq_length, 1) + <= seq_ids[None, :, None] + ) + + # add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + if has_query: # UniLM style attention mask + causal_mask = torch.cat( + [ + torch.zeros( + (batch_size, prefix_seq_len, seq_length), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=1, + ) + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, causal_mask.shape[1], prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + extended_attention_mask = ( + causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + ) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype + ) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # use_cache = use_cache if use_cache is not None else self.config.use_cache + + if input_ids is None: + assert ( + query_embeds is not None + ), "You have to specify query_embeds when input_ids is None" + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] - self.config.query_length + if past_key_values is not None + else 0 + ) + + query_length = query_embeds.shape[1] if query_embeds is not None else 0 + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + query_embeds=query_embeds, + past_key_values_length=past_key_values_length, + ) + + input_shape = embedding_output.size()[:-1] + batch_size, seq_length = input_shape + device = embedding_output.device + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), device=device + ) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if is_decoder: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, + input_ids.shape, + device, + is_decoder, + has_query=(query_embeds is not None), + ) + else: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder + ) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ + 0 + ].size() + else: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + query_length=query_length, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None + ) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=True, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction="mean", + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if labels is not None: + use_cache = False + if past_key_values is not None: + query_embeds = None + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + sequence_output = outputs[0] + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1] :, :] + + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + if reduction == "none": + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + query_mask = input_ids.new_ones(query_embeds.shape[:-1]) + attention_mask = torch.cat([query_mask, attention_mask], dim=-1) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "query_embeds": query_embeds, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx) for past_state in layer_past + ), + ) + return reordered_past + + +class BertForMaskedLM(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=False, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1] :, :] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ( + ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + ) + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/minigpt4/models/__init__.py b/minigpt4/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc01b56181aa81554efbe9df10ab3678a1c7bb86 --- /dev/null +++ b/minigpt4/models/__init__.py @@ -0,0 +1,202 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import logging +import torch +from omegaconf import OmegaConf + +from minigpt4.common.registry import registry +from minigpt4.models.base_model import BaseModel +from minigpt4.models.minigpt_base import MiniGPTBase +from minigpt4.models.minigpt4 import MiniGPT4 +from minigpt4.models.minigpt_v2 import MiniGPTv2 +from minigpt4.processors.base_processor import BaseProcessor + + +__all__ = [ + "load_model", + "BaseModel", + "MiniGPTBase", + "MiniGPT4", + "MiniGPTv2" +] + + +def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None): + """ + Load supported models. + + To list all available models and types in registry: + >>> from minigpt4.models import model_zoo + >>> print(model_zoo) + + Args: + name (str): name of the model. + model_type (str): type of the model. + is_eval (bool): whether the model is in eval mode. Default: False. + device (str): device to use. Default: "cpu". + checkpoint (str): path or to checkpoint. Default: None. + Note that expecting the checkpoint to have the same keys in state_dict as the model. + + Returns: + model (torch.nn.Module): model. + """ + + model = registry.get_model_class(name).from_pretrained(model_type=model_type) + + if checkpoint is not None: + model.load_checkpoint(checkpoint) + + if is_eval: + model.eval() + + if device == "cpu": + model = model.float() + + return model.to(device) + + +def load_preprocess(config): + """ + Load preprocessor configs and construct preprocessors. + + If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing. + + Args: + config (dict): preprocessor configs. + + Returns: + vis_processors (dict): preprocessors for visual inputs. + txt_processors (dict): preprocessors for text inputs. + + Key is "train" or "eval" for processors used in training and evaluation respectively. + """ + + def _build_proc_from_cfg(cfg): + return ( + registry.get_processor_class(cfg.name).from_config(cfg) + if cfg is not None + else BaseProcessor() + ) + + vis_processors = dict() + txt_processors = dict() + + vis_proc_cfg = config.get("vis_processor") + txt_proc_cfg = config.get("text_processor") + + if vis_proc_cfg is not None: + vis_train_cfg = vis_proc_cfg.get("train") + vis_eval_cfg = vis_proc_cfg.get("eval") + else: + vis_train_cfg = None + vis_eval_cfg = None + + vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg) + vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg) + + if txt_proc_cfg is not None: + txt_train_cfg = txt_proc_cfg.get("train") + txt_eval_cfg = txt_proc_cfg.get("eval") + else: + txt_train_cfg = None + txt_eval_cfg = None + + txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg) + txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg) + + return vis_processors, txt_processors + + +def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"): + """ + Load model and its related preprocessors. + + List all available models and types in registry: + >>> from minigpt4.models import model_zoo + >>> print(model_zoo) + + Args: + name (str): name of the model. + model_type (str): type of the model. + is_eval (bool): whether the model is in eval mode. Default: False. + device (str): device to use. Default: "cpu". + + Returns: + model (torch.nn.Module): model. + vis_processors (dict): preprocessors for visual inputs. + txt_processors (dict): preprocessors for text inputs. + """ + model_cls = registry.get_model_class(name) + + # load model + model = model_cls.from_pretrained(model_type=model_type) + + if is_eval: + model.eval() + + # load preprocess + cfg = OmegaConf.load(model_cls.default_config_path(model_type)) + if cfg is not None: + preprocess_cfg = cfg.preprocess + + vis_processors, txt_processors = load_preprocess(preprocess_cfg) + else: + vis_processors, txt_processors = None, None + logging.info( + f"""No default preprocess for model {name} ({model_type}). + This can happen if the model is not finetuned on downstream datasets, + or it is not intended for direct use without finetuning. + """ + ) + + if device == "cpu" or device == torch.device("cpu"): + model = model.float() + + return model.to(device), vis_processors, txt_processors + + +class ModelZoo: + """ + A utility class to create string representation of available model architectures and types. + + >>> from minigpt4.models import model_zoo + >>> # list all available models + >>> print(model_zoo) + >>> # show total number of models + >>> print(len(model_zoo)) + """ + + def __init__(self) -> None: + self.model_zoo = { + k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys()) + for k, v in registry.mapping["model_name_mapping"].items() + } + + def __str__(self) -> str: + return ( + "=" * 50 + + "\n" + + f"{'Architectures':<30} {'Types'}\n" + + "=" * 50 + + "\n" + + "\n".join( + [ + f"{name:<30} {', '.join(types)}" + for name, types in self.model_zoo.items() + ] + ) + ) + + def __iter__(self): + return iter(self.model_zoo.items()) + + def __len__(self): + return sum([len(v) for v in self.model_zoo.values()]) + + +model_zoo = ModelZoo() diff --git a/minigpt4/models/__pycache__/Qformer.cpython-39.pyc b/minigpt4/models/__pycache__/Qformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e303e2b410ca399958549dbfed6268b95bfe3cc9 Binary files /dev/null and b/minigpt4/models/__pycache__/Qformer.cpython-39.pyc differ diff --git a/minigpt4/models/__pycache__/__init__.cpython-39.pyc b/minigpt4/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6ba25714afd58935d79b63c5b3353f6aa9e7866 Binary files /dev/null and b/minigpt4/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/models/__pycache__/base_model.cpython-39.pyc b/minigpt4/models/__pycache__/base_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47e1f7fa408778391b107a6bb2f1cf01ccf8a3ac Binary files /dev/null and b/minigpt4/models/__pycache__/base_model.cpython-39.pyc differ diff --git a/minigpt4/models/__pycache__/eva_vit.cpython-39.pyc b/minigpt4/models/__pycache__/eva_vit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..309634afba7f4e75d3262883e2fdce704abb8f76 Binary files /dev/null and b/minigpt4/models/__pycache__/eva_vit.cpython-39.pyc differ diff --git a/minigpt4/models/__pycache__/minigpt4.cpython-39.pyc b/minigpt4/models/__pycache__/minigpt4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b0820356b3c8b0f350efa251f09e75d56d14a0f Binary files /dev/null and b/minigpt4/models/__pycache__/minigpt4.cpython-39.pyc differ diff --git a/minigpt4/models/__pycache__/minigpt_base.cpython-39.pyc b/minigpt4/models/__pycache__/minigpt_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df568169d4a4d19666b7821aa430100897bbb006 Binary files /dev/null and b/minigpt4/models/__pycache__/minigpt_base.cpython-39.pyc differ diff --git a/minigpt4/models/__pycache__/minigpt_v2.cpython-39.pyc b/minigpt4/models/__pycache__/minigpt_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1628c9d97d56f75b330d2ac4ed482111f7b1d6b6 Binary files /dev/null and b/minigpt4/models/__pycache__/minigpt_v2.cpython-39.pyc differ diff --git a/minigpt4/models/base_model.py b/minigpt4/models/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..d5c9876c33526dccdb5e16499c0b0f84c734379e --- /dev/null +++ b/minigpt4/models/base_model.py @@ -0,0 +1,310 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import os +import logging +import contextlib + +from omegaconf import OmegaConf +import numpy as np +import torch +import torch.nn as nn +from transformers import AutoTokenizer +from peft import ( + LoraConfig, + get_peft_model, + prepare_model_for_int8_training, +) + +from minigpt4.common.dist_utils import download_cached_file +from minigpt4.common.utils import get_abs_path, is_url +from minigpt4.models.eva_vit import create_eva_vit_g +from modified import PhiForCausalLM +# from transformers import PhiForCausalLM + + + +class BaseModel(nn.Module): + """Base class for models.""" + + def __init__(self): + super().__init__() + + @property + def device(self): + return list(self.parameters())[-1].device + + def load_checkpoint(self, url_or_filename): + """ + Load from a finetuned checkpoint. + + This should expect no mismatch in the model keys and the checkpoint keys. + """ + + if is_url(url_or_filename): + cached_file = download_cached_file( + url_or_filename, check_hash=False, progress=True + ) + checkpoint = torch.load(cached_file, map_location="cpu") + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location="cpu") + else: + raise RuntimeError("checkpoint url or path is invalid") + + if "model" in checkpoint.keys(): + state_dict = checkpoint["model"] + else: + state_dict = checkpoint + + msg = self.load_state_dict(state_dict, strict=False) + + logging.info("Missing keys {}".format(msg.missing_keys)) + logging.info("load checkpoint from %s" % url_or_filename) + + return msg + + @classmethod + def from_pretrained(cls, model_type): + """ + Build a pretrained model from default configuration file, specified by model_type. + + Args: + - model_type (str): model type, specifying architecture and checkpoints. + + Returns: + - model (nn.Module): pretrained or finetuned model, depending on the configuration. + """ + model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model + model = cls.from_config(model_cfg) + + return model + + @classmethod + def default_config_path(cls, model_type): + assert ( + model_type in cls.PRETRAINED_MODEL_CONFIG_DICT + ), "Unknown model type {}".format(model_type) + return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type]) + + def load_checkpoint_from_config(self, cfg, **kwargs): + """ + Load checkpoint as specified in the config file. + + If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model. + When loading the pretrained model, each task-specific architecture may define their + own load_from_pretrained() method. + """ + load_finetuned = cfg.get("load_finetuned", True) + if load_finetuned: + finetune_path = cfg.get("finetuned", None) + assert ( + finetune_path is not None + ), "Found load_finetuned is True, but finetune_path is None." + self.load_checkpoint(url_or_filename=finetune_path) + else: + # load pre-trained weights + pretrain_path = cfg.get("pretrained", None) + assert "Found load_finetuned is False, but pretrain_path is None." + self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs) + + def before_evaluation(self, **kwargs): + pass + + def show_n_params(self, return_str=True): + tot = 0 + for p in self.parameters(): + w = 1 + for x in p.shape: + w *= x + tot += w + if return_str: + if tot >= 1e6: + return "{:.1f}M".format(tot / 1e6) + else: + return "{:.1f}K".format(tot / 1e3) + else: + return tot + + def maybe_autocast(self, dtype=torch.float16): + # if on cpu, don't use autocast + # if on gpu, use autocast with dtype if provided, otherwise use torch.float16 + enable_autocast = self.device != torch.device("cpu") + + if enable_autocast: + return torch.cuda.amp.autocast(dtype=dtype) + else: + return contextlib.nullcontext() + + @classmethod + def init_vision_encoder( + cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision, freeze + ): + logging.info('Loading VIT') + + assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4" + if not freeze: + precision = "fp32" # fp16 is not for training + + visual_encoder = create_eva_vit_g( + img_size, drop_path_rate, use_grad_checkpoint, precision + ) + + ln_vision = LayerNorm(visual_encoder.num_features) + + if freeze: + for name, param in visual_encoder.named_parameters(): + param.requires_grad = False + visual_encoder = visual_encoder.eval() + visual_encoder.train = disabled_train + for name, param in ln_vision.named_parameters(): + param.requires_grad = False + ln_vision = ln_vision.eval() + ln_vision.train = disabled_train + logging.info("freeze vision encoder") + + logging.info('Loading VIT Done') + return visual_encoder, ln_vision + + def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0, + lora_target_modules=['Wqkv','out_proj'], **lora_kargs): + logging.info('Loading LLAMA') + llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_path, use_fast=False) + llama_tokenizer.pad_token = llama_tokenizer.eos_token + + if low_resource: + llama_model = PhiForCausalLM.from_pretrained( + llama_model_path, + torch_dtype=torch.float32, + load_in_8bit=True, + device_map={'': low_res_device} + ) + else: + llama_model = PhiForCausalLM.from_pretrained( + llama_model_path, + torch_dtype=torch.float32, + ) + + if lora_r > 0: + # llama_model = prepare_model_for_int8_training(llama_model) + loraconfig = LoraConfig( + r=lora_r, + bias="none", + task_type="CAUSAL_LM", + target_modules=lora_target_modules, + **lora_kargs + ) + llama_model = get_peft_model(llama_model, loraconfig) + + llama_model.print_trainable_parameters() + for i, layer in enumerate(llama_model.model.model.layers): + # layer.register_forward_hook(print_layer_output) + # set trainable to True for the input_layernorm layer + layer.self_attn.q_layernorm.weight.requires_grad = True + layer.self_attn.k_layernorm.weight.requires_grad = True + layer.post_layernorm.weight.requires_grad = True + layer.input_layernorm.weight.requires_grad = True + + # layer.self_attn.q_layernorm.weight.data = layer.self_attn.q_layernorm.weight.data.float() + # layer.self_attn.k_layernorm.weight.data = layer.self_attn.k_layernorm.weight.data.float() + # layer.post_layernorm.weight.data = layer.post_layernorm.weight.data.float() + # layer.input_layernorm.weight.data = layer.input_layernorm.weight.data.float() + + # # 对偏置项进行类似操作 + # if layer.self_attn.q_layernorm.bias is not None: + # layer.self_attn.q_layernorm.bias.data = layer.self_attn.q_layernorm.bias.data.float() + # if layer.self_attn.k_layernorm.bias is not None: + # layer.self_attn.k_layernorm.bias.data = layer.self_attn.k_layernorm.bias.data.float() + # if layer.input_layernorm.bias is not None: + # layer.input_layernorm.bias.data = layer.input_layernorm.bias.data.float() + + + # llama_model.model.model.final_layernorm.weight.requires_grad = True + # llama_model.model.model.final_layernorm.weight.data = llama_model.model.model.final_layernorm.weight.data.float() + # if llama_model.model.model.final_layernorm.bias is not None: + # llama_model.model.model.final_layernorm.bias.data = llama_model.model.model.final_layernorm.bias.float() + + else: + for name, param in llama_model.named_parameters(): + param.requires_grad = False + + # for i, layer in enumerate(llama_model.model.layers): + # # 如果层的索引小于5,则将该层的参数设置为可训练 + # if i < 5: + # for param in layer.parameters(): + # param.requires_grad = True + # # 将这些层的参数转换为FP32 + # layer.to(torch.float32) + for i, layer in enumerate(llama_model.model.layers): + # layer.register_forward_hook(print_layer_output) + # set trainable to True for the input_layernorm layer + layer.self_attn.q_layernorm.weight.requires_grad = True + layer.self_attn.k_layernorm.weight.requires_grad = True + layer.post_layernorm.weight.requires_grad = True + layer.input_layernorm.weight.requires_grad = True + + layer.self_attn.q_layernorm.weight.data = layer.self_attn.q_layernorm.weight.data.float() + layer.self_attn.k_layernorm.weight.data = layer.self_attn.k_layernorm.weight.data.float() + layer.post_layernorm.weight.data = layer.post_layernorm.weight.data.float() + layer.input_layernorm.weight.data = layer.input_layernorm.weight.data.float() + + # 对偏置项进行类似操作 + if layer.self_attn.q_layernorm.bias is not None: + layer.self_attn.q_layernorm.bias.data = layer.self_attn.q_layernorm.bias.data.float() + if layer.self_attn.k_layernorm.bias is not None: + layer.self_attn.k_layernorm.bias.data = layer.self_attn.k_layernorm.bias.data.float() + if layer.input_layernorm.bias is not None: + layer.input_layernorm.bias.data = layer.input_layernorm.bias.data.float() + + + llama_model.model.final_layernorm.weight.requires_grad = True + llama_model.model.final_layernorm.weight.data = llama_model.model.final_layernorm.weight.data.float() + if llama_model.model.final_layernorm.bias is not None: + llama_model.model.final_layernorm.bias.data = llama_model.model.final_layernorm.bias.float() + + logging.info('Loading LLAMA Done') + return llama_model, llama_tokenizer + + + def load_from_pretrained(self, url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file( + url_or_filename, check_hash=False, progress=True + ) + checkpoint = torch.load(cached_file, map_location="cpu") + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location="cpu") + else: + raise RuntimeError("checkpoint url or path is invalid") + + state_dict = checkpoint["model"] + + msg = self.load_state_dict(state_dict, strict=False) + + # logging.info("Missing keys {}".format(msg.missing_keys)) + logging.info("load checkpoint from %s" % url_or_filename) + + return msg + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + + + + diff --git a/minigpt4/models/configuration_phi.py b/minigpt4/models/configuration_phi.py new file mode 100644 index 0000000000000000000000000000000000000000..27cb3b49799153bce735dd696c4653ba4a0d5271 --- /dev/null +++ b/minigpt4/models/configuration_phi.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +from typing import Optional + +from transformers import PretrainedConfig + + +class PhiConfig(PretrainedConfig): + """Phi configuration.""" + + model_type = "phi-msft" + attribute_map = { + "max_position_embeddings": "n_positions", + "hidden_size": "n_embd", + "num_attention_heads": "n_head", + "num_hidden_layers": "n_layer", + } + + def __init__( + self, + vocab_size: int = 50304, + n_positions: int = 2048, + n_embd: int = 1024, + n_layer: int = 20, + n_inner: Optional[int] = None, + n_head: int = 16, + n_head_kv: Optional[int] = None, + rotary_dim: Optional[int] = 32, + activation_function: Optional[str] = "gelu_new", + flash_attn: bool = False, + flash_rotary: bool = False, + fused_dense: bool = False, + attn_pdrop: float = 0.0, + embd_pdrop: float = 0.0, + resid_pdrop: float = 0.0, + layer_norm_epsilon: float = 1e-5, + initializer_range: float = 0.02, + tie_word_embeddings: bool = False, + pad_vocab_size_multiple: int = 64, + **kwargs + ) -> None: + self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple) + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_inner = n_inner + self.n_head = n_head + self.n_head_kv = n_head_kv + self.rotary_dim = min(rotary_dim, n_embd // n_head) + self.activation_function = activation_function + self.flash_attn = flash_attn + self.flash_rotary = flash_rotary + self.fused_dense = fused_dense + self.attn_pdrop = attn_pdrop + self.embd_pdrop = embd_pdrop + self.resid_pdrop = resid_pdrop + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + + super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) diff --git a/minigpt4/models/eva_vit.py b/minigpt4/models/eva_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..7fcc63a74049f1faf65c99943ef94f72383ca3f5 --- /dev/null +++ b/minigpt4/models/eva_vit.py @@ -0,0 +1,442 @@ +# Based on EVA, BEIT, timm and DeiT code bases +# https://github.com/baaivision/EVA +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + +from minigpt4.common.dist_utils import download_cached_file + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + **kwargs + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, rel_pos_bias=None): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values is not None and init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias=None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x, **kwargs): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + # trunc_normal_(self.relative_position_bias_table, std=.02) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + use_mean_pooling=True, init_scale=0.001, use_checkpoint=False): + super().__init__() + self.image_size = img_size + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) + else: + self.rel_pos_bias = None + self.use_checkpoint = use_checkpoint + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) + for i in range(depth)]) +# self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) +# self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None +# self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) +# if isinstance(self.head, nn.Linear): +# trunc_normal_(self.head.weight, std=.02) + self.apply(self._init_weights) + self.fix_init_weight() +# if isinstance(self.head, nn.Linear): +# self.head.weight.data.mul_(init_scale) +# self.head.bias.data.mul_(init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, rel_pos_bias) + else: + x = blk(x, rel_pos_bias) + return x +# x = self.norm(x) + +# if self.fc_norm is not None: +# t = x[:, 1:, :] +# return self.fc_norm(t.mean(1)) +# else: +# return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) +# x = self.head(x) + return x + + def get_intermediate_layers(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + features = [] + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias) + features.append(x) + + return features + + +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'].float() + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + +def convert_weights_to_fp16(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + +# if isinstance(l, (nn.MultiheadAttention, Attention)): +# for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: +# tensor = getattr(l, attr) +# if tensor is not None: +# tensor.data = tensor.data.half() + + model.apply(_convert_weights_to_fp16) + + +def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"): + model = VisionTransformer( + img_size=img_size, + patch_size=14, + use_mean_pooling=False, + embed_dim=1408, + depth=39, + num_heads=1408//88, + mlp_ratio=4.3637, + qkv_bias=True, + drop_path_rate=drop_path_rate, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + use_checkpoint=use_checkpoint, + ) + url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth" + cached_file = download_cached_file( + url, check_hash=False, progress=True + ) + state_dict = torch.load(cached_file, map_location="cpu") + interpolate_pos_embed(model,state_dict) + + incompatible_keys = model.load_state_dict(state_dict, strict=False) +# print(incompatible_keys) + + if precision == "fp16": +# model.to("cuda") + convert_weights_to_fp16(model) + return model \ No newline at end of file diff --git a/minigpt4/models/minigpt4.py b/minigpt4/models/minigpt4.py new file mode 100644 index 0000000000000000000000000000000000000000..31783ebdc6471645167eb71fae40fa1b7df1db63 --- /dev/null +++ b/minigpt4/models/minigpt4.py @@ -0,0 +1,216 @@ +import logging +import random + +import torch +from torch.cuda.amp import autocast as autocast +import torch.nn as nn + +from minigpt4.common.registry import registry +from minigpt4.models.base_model import disabled_train +from minigpt4.models.minigpt_base import MiniGPTBase +from minigpt4.models.Qformer import BertConfig, BertLMHeadModel + + +@registry.register_model("minigpt4") +class MiniGPT4(MiniGPTBase): + """ + MiniGPT-4 model + """ + + PRETRAINED_MODEL_CONFIG_DICT = { + "pretrain_vicuna0": "configs/models/minigpt4_vicuna0.yaml", + "pretrain_llama2": "configs/models/minigpt4_llama2.yaml", + } + + def __init__( + self, + vit_model="eva_clip_g", + q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth", + img_size=224, + drop_path_rate=0, + use_grad_checkpoint=False, + vit_precision="fp16", + freeze_vit=True, + has_qformer=True, + freeze_qformer=True, + num_query_token=32, + llama_model="", + prompt_path="", + prompt_template="", + max_txt_len=32, + end_sym='\n', + low_resource=False, # use 8 bit and put vit in cpu + device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore. + lora_r=64, + lora_target_modules=['query_key_value','dense'], + lora_alpha=16, + lora_dropout=0.05, + ): + super().__init__( + vit_model=vit_model, + img_size=img_size, + drop_path_rate=drop_path_rate, + use_grad_checkpoint=use_grad_checkpoint, + vit_precision=vit_precision, + freeze_vit=freeze_vit, + llama_model=llama_model, + max_txt_len=max_txt_len, + end_sym=end_sym, + low_resource=low_resource, + device_8bit=device_8bit, + lora_r=lora_r, + lora_target_modules=lora_target_modules, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + + self.has_qformer = True + if self.has_qformer: + print('Loading Q-Former') + self.Qformer, self.query_tokens = self.init_Qformer( + num_query_token, self.visual_encoder.num_features, freeze_qformer + ) + self.load_from_pretrained(url_or_filename=q_former_model) # load q-former weights here + + img_f_dim = self.Qformer.config.hidden_size + print('Loading Q-Former Done') + else: + img_f_dim = self.visual_encoder.num_features * 4 + print('Do not use Q-Former here.') + print(img_f_dim,self.llama_model.config.hidden_size) + self.llama_proj = nn.Linear( + self.Qformer.config.hidden_size, 4096 + ) + self.llama_proj2 = nn.Linear( + 4096, self.llama_model.config.hidden_size + ) + + + if prompt_path: + with open(prompt_path, 'r') as f: + raw_prompts = f.read().splitlines() + filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "" in raw_prompt] + self.prompt_list = [prompt_template.format(p) for p in filted_prompts] + print('Load {} training prompts'.format(len(self.prompt_list))) + print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) + else: + self.prompt_list = [] + + @classmethod + def init_Qformer(cls, num_query_token, vision_width, freeze): + encoder_config = BertConfig.from_pretrained("bert-base-uncased") + encoder_config.encoder_width = vision_width + # insert cross-attention layer every other block + encoder_config.add_cross_attention = True + encoder_config.cross_attention_freq = 2 + encoder_config.query_length = num_query_token + Qformer = BertLMHeadModel(config=encoder_config) + query_tokens = nn.Parameter( + torch.zeros(1, num_query_token, encoder_config.hidden_size) + ) + query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) + + Qformer.cls = None + Qformer.bert.embeddings.word_embeddings = None + Qformer.bert.embeddings.position_embeddings = None + for layer in Qformer.bert.encoder.layer: + layer.output = None + layer.intermediate = None + + if freeze: + for name, param in Qformer.named_parameters(): + param.requires_grad = False + Qformer = Qformer.eval() + Qformer.train = disabled_train + query_tokens.requires_grad = False + logging.info("freeze Qformer") + + return Qformer, query_tokens + + def encode_img(self, image): + device = image.device + + if len(image.shape) > 4: + image = image.reshape(-1, *image.shape[-3:]) + + with self.maybe_autocast(): + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) + if self.has_qformer: + image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) + + query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) + query_output = self.Qformer.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + inputs_llama = self.llama_proj(query_output.last_hidden_state) + inputs_llama = self.llama_proj2(inputs_llama) + + else: + image_embeds = image_embeds[:, 1:, :] + bs, pn, hs = image_embeds.shape + image_embeds = image_embeds.view(bs, int(pn / 4), int(hs * 4)) + + inputs_llama = self.llama_proj(image_embeds) + inputs_llama = self.llama_proj2(inputs_llama) + + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device) + return inputs_llama, atts_llama + + @classmethod + def from_config(cls, cfg): + vit_model = cfg.get("vit_model", "eva_clip_g") + q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") + img_size = cfg.get("image_size") + num_query_token = cfg.get("num_query_token") + llama_model = cfg.get("llama_model") + + drop_path_rate = cfg.get("drop_path_rate", 0) + use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) + vit_precision = cfg.get("vit_precision", "fp16") + freeze_vit = cfg.get("freeze_vit", True) + has_qformer = cfg.get("has_qformer", True) + freeze_qformer = cfg.get("freeze_qformer", True) + low_resource = cfg.get("low_resource", False) + device_8bit = cfg.get("device_8bit", 0) + + prompt_path = cfg.get("prompt_path", "") + prompt_template = cfg.get("prompt_template", "") + max_txt_len = cfg.get("max_txt_len", 32) + end_sym = cfg.get("end_sym", '\n') + + lora_r = cfg.get("lora_r", 64) + lora_alpha = cfg.get("lora_alpha", 16) + + model = cls( + vit_model=vit_model, + q_former_model=q_former_model, + img_size=img_size, + drop_path_rate=drop_path_rate, + use_grad_checkpoint=use_grad_checkpoint, + vit_precision=vit_precision, + freeze_vit=freeze_vit, + has_qformer=has_qformer, + freeze_qformer=freeze_qformer, + num_query_token=num_query_token, + llama_model=llama_model, + prompt_path=prompt_path, + prompt_template=prompt_template, + max_txt_len=max_txt_len, + end_sym=end_sym, + low_resource=low_resource, + device_8bit=device_8bit, + lora_r=lora_r, + lora_alpha=lora_alpha, + ) + + ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 + if ckpt_path: + print("Load MiniGPT-4 Checkpoint: {}".format(ckpt_path)) + ckpt = torch.load(ckpt_path, map_location="cpu") + msg = model.load_state_dict(ckpt['model'], strict=False) + + return model diff --git a/minigpt4/models/minigpt_base.py b/minigpt4/models/minigpt_base.py new file mode 100644 index 0000000000000000000000000000000000000000..34183b781959173bc9af38945f046f17ea0d2153 --- /dev/null +++ b/minigpt4/models/minigpt_base.py @@ -0,0 +1,411 @@ +import logging +import random + +import torch +from torch.cuda.amp import autocast as autocast +import torch.nn as nn + +from minigpt4.common.registry import registry +from minigpt4.models.base_model import BaseModel +from transformers import StoppingCriteria, StoppingCriteriaList + +from minigpt4.conversation.conversation import StoppingCriteriaSub + +class MiniGPTBase(BaseModel): + """ + Base class for MiniGPT-4 and MiniGPT-v2 + """ + + def __init__( + self, + vit_model="eva_clip_g", + img_size=224, + drop_path_rate=0, + use_grad_checkpoint=False, + vit_precision="fp16", + freeze_vit=True, + llama_model="", + max_txt_len=32, + max_context_len=3800, + prompt_template="", + end_sym='\n', + low_resource=False, # use 8 bit and put vit in cpu + device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore. + lora_r=0, # lora_r means lora is not used + lora_target_modules=["q_proj", "v_proj"], + lora_alpha=16, + lora_dropout=0.05, + ): + super().__init__() + + self.llama_model, self.llama_tokenizer = self.init_llm( + llama_model_path=llama_model, + low_resource=low_resource, + low_res_device=device_8bit, + lora_r=lora_r, + lora_target_modules=lora_target_modules, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + + self.visual_encoder, self.ln_vision = self.init_vision_encoder( + vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision, freeze_vit + ) + + self.max_txt_len = max_txt_len + self.max_context_len = max_context_len + self.end_sym = end_sym + + self.prompt_template = prompt_template + self.prompt_list = [] + + def vit_to_cpu(self): + self.ln_vision.to("cpu") + self.ln_vision.float() + self.visual_encoder.to("cpu") + self.visual_encoder.float() + + def get_context_emb(self, prompt, img_list): + device = img_list[0].device + prompt_segs = prompt.split('') + assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." + seg_tokens = [ + self.llama_tokenizer( + seg, return_tensors="pt", add_special_tokens=i==0).to(device).input_ids # only add bos to the first seg + for i, seg in enumerate(prompt_segs) + ] + seg_embs = [self.embed_tokens(seg_t) for seg_t in seg_tokens] + + mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] + mixed_embs = torch.cat(mixed_embs, dim=1) + return mixed_embs + + def prompt_wrap(self, img_embeds, atts_img, prompts, lengths=None): + if prompts is None or len(prompts) == 0: + # prompts is not provided, just return the original image embedding + return img_embeds, atts_img + elif img_embeds is None: + # prompt is provided but there is no image embedding. return the prompt embedding in right padding + self.llama_tokenizer.padding_side = "right" + prompt_tokens = self.llama_tokenizer( + prompts, + return_tensors="pt", + padding="longest", + add_special_tokens=False + ).to(self.device) + prompt_embeds = self.embed_tokens(prompt_tokens.input_ids) + atts_prompt = prompt_tokens.attention_mask + return prompt_embeds, atts_prompt + else: + # return the multi-modal embedding in right padding + emb_lists = [] + if isinstance(prompts, str): + prompts = [prompts] * len(img_embeds) + + for idx, (each_img_embed, each_prompt) in enumerate(zip(img_embeds, prompts)): + pn = each_img_embed.shape[-2] + if lengths is not None: + each_img_embed = each_img_embed.reshape(-1, each_img_embed.shape[-1]) + each_img_embed = each_img_embed[:lengths[idx] * pn] + p_segs = each_prompt.split('') + interleave_emb = [] + for idx, seg in enumerate(p_segs[:-1]): + p_tokens = self.llama_tokenizer( + seg, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) + p_embed = self.embed_tokens(p_tokens.input_ids) + interleave_emb.append(torch.cat([p_embed, each_img_embed[None][:, idx * pn:(idx + 1) * pn]], dim=1)) + wrapped_emb = torch.cat(interleave_emb, dim=1) + p_tokens = self.llama_tokenizer( + p_segs[-1], return_tensors="pt", add_special_tokens=False).to(img_embeds.device) + p_embed = self.embed_tokens(p_tokens.input_ids) + wrapped_emb = torch.cat([wrapped_emb, p_embed], dim=1) + emb_lists.append(wrapped_emb) + + emb_lens = [emb.shape[1] for emb in emb_lists] + pad_emb = self.embed_tokens(torch.tensor(self.llama_tokenizer.pad_token_id, device=img_embeds.device)) + + max_length = max(emb_lens) if max(emb_lens) < self.max_context_len else self.max_context_len + wrapped_embs = pad_emb.expand(len(emb_lens), max_length, -1).clone() + wrapped_atts = torch.zeros([len(emb_lens), max_length], dtype=torch.int, device=img_embeds.device) + + for i, emb in enumerate(emb_lists): + length = emb_lens[i] if emb_lens[i] < self.max_context_len else self.max_context_len + wrapped_embs[i, :length] = emb[:, :length] + wrapped_atts[i, :length] = 1 + return wrapped_embs, wrapped_atts + + def concat_emb_input_output(self, input_embs, input_atts, output_embs, output_atts): + """ + Concatenate the batched input embedding and batched output embedding together. + Both the input and the output embedding should be right padded. + """ + input_lens = [] + cat_embs = [] + cat_atts = [] + for i in range(input_embs.size(0)): + input_len = input_atts[i].sum() + input_lens.append(input_len) + cat_embs.append( + torch.cat([ + input_embs[i][:input_len], + output_embs[i], + input_embs[i][input_len:] + ]) + ) + cat_atts.append( + torch.cat([ + input_atts[i][:input_len], + output_atts[i], + input_atts[i][input_len:] + ]) + ) + cat_embs = torch.stack(cat_embs) + cat_atts = torch.stack(cat_atts) + return cat_embs, cat_atts, input_lens + + def tokenize_conversation(self, conv_q, conv_a): + """concatenate conversation and make sure the model is only trained to regress the answer""" + + to_regress_token_ids_list = [] + targets_list = [] + + batch_size = len(conv_q) + for batch_idx in range(batch_size): + questions, answers = conv_q[batch_idx], conv_a[batch_idx] + questions = [self.llama_tokenizer(self.llama_tokenizer.bos_token + q, + return_tensors="pt", + add_special_tokens=False).to(self.device) for q in questions[1:]] # the first question is handled in the prompt wrap function, skip it + answers = [self.llama_tokenizer(a + self.end_sym, + return_tensors="pt", + add_special_tokens=False).to(self.device) for a in answers] + cur_id = [] + cur_target = [] + for i in range(len(questions)): + cur_id.append(answers[i].input_ids) + cur_target.append(answers[i].input_ids) + cur_id.append(questions[i].input_ids) + cur_target.append(torch.ones_like(questions[i].input_ids) * -100) + + cur_id.append(answers[-1].input_ids) + cur_target.append(answers[-1].input_ids) + + cur_id = torch.cat(cur_id, dim=1) + cur_target = torch.cat(cur_target, dim=1) + to_regress_token_ids_list.append(cur_id) + targets_list.append(cur_target) + + max_len = min(max([target.shape[1] for target in targets_list]), self.max_txt_len) + to_regress_token_ids = torch.ones([batch_size, max_len], + dtype=cur_id.dtype, device=self.device) * self.llama_tokenizer.pad_token_id + targets = torch.ones([batch_size, max_len], + dtype=cur_id.dtype, device=self.device) * -100 + for batch_idx in range(batch_size): + cur_len = to_regress_token_ids_list[batch_idx].shape[1] + to_regress_token_ids[batch_idx, :cur_len] = to_regress_token_ids_list[batch_idx][0, :max_len] + targets[batch_idx, :cur_len] = targets_list[batch_idx][0, :max_len] + + to_regress_token_attn = (to_regress_token_ids != self.llama_tokenizer.pad_token_id).to(torch.int) + + return to_regress_token_ids, to_regress_token_attn, targets + + def preparing_embedding(self, samples): + ### prepare input tokens + if 'image' in samples: + img_embeds, img_atts = self.encode_img(samples["image"]) + else: + img_embeds = img_atts = None + + if 'conv_q' in samples: + # handeling conversation datasets + conv_q, conv_a = samples['conv_q'], samples['conv_a'] + + connect_sym = samples['connect_sym'][0] + conv_q = [q.split(connect_sym)for q in conv_q] + conv_a = [a.split(connect_sym) for a in conv_a] + + conv_q = [[self.prompt_template.format(item) for item in items] for items in conv_q] + + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, [q[0] for q in conv_q]) + regress_token_ids, regress_atts, part_targets = self.tokenize_conversation(conv_q, conv_a) + + else: + if "instruction_input" in samples: + instruction = samples["instruction_input"] + elif self.prompt_list: + instruction = random.choice(self.prompt_list) + else: + instruction = None + + if hasattr(self, 'chat_template') and self.chat_template: + instruction = [self.prompt_template.format(instruct) for instruct in instruction] + + if 'length' in samples: + # the input is a image train (like videos) + bsz, pn, hs = img_embeds.shape + img_embeds = img_embeds.reshape(len(samples['image']), -1, pn, hs) + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction, samples['length']) + else: + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction) + + ### prepare target tokens + self.llama_tokenizer.padding_side = "right" + text = [t + self.end_sym for t in samples["answer"]] + + regress_tokens = self.llama_tokenizer( + text, + return_tensors="pt", + padding="longest", + truncation=True, + max_length=self.max_txt_len, + add_special_tokens=False + ).to(self.device) + + regress_token_ids = regress_tokens.input_ids + regress_atts = regress_tokens.attention_mask + part_targets = regress_token_ids.masked_fill( + regress_token_ids == self.llama_tokenizer.pad_token_id, -100 + ) + + regress_embeds = self.embed_tokens(regress_token_ids) + + return cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets + + def forward(self, samples,): + # prepare the embedding to condition and the embedding to regress + cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets = \ + self.preparing_embedding(samples) + + # concat the embedding to condition and the embedding to regress + inputs_embeds, attention_mask, input_lens = \ + self.concat_emb_input_output(cond_embeds, cond_atts, regress_embeds, regress_atts) + + # get bos token embedding + bos = torch.ones_like(part_targets[:, :1]) * self.llama_tokenizer.bos_token_id + bos_embeds = self.embed_tokens(bos) + bos_atts = cond_atts[:, :1] + + # add bos token at the begining + inputs_embeds = torch.cat([bos_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_atts, attention_mask], dim=1) + + # ensemble the final targets + targets = torch.ones([inputs_embeds.shape[0], inputs_embeds.shape[1]], + dtype=torch.long).to(self.device).fill_(-100) + + for i, target in enumerate(part_targets): + targets[i, input_lens[i]+1:input_lens[i]+len(target)+1] = target # plus 1 for bos + + with self.maybe_autocast(): + outputs = self.llama_model( + input_ids = None, + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + ) + loss = outputs.loss + + return {"loss": loss} + + def embed_tokens(self, token_ids): + if hasattr(self.llama_model.base_model, 'model'): ## lora wrapped model + embeds = self.llama_model.model.model.embed_tokens(token_ids) + else: + embeds = self.llama_model.model.embed_tokens(token_ids) + return embeds + + @torch.no_grad() + def generate( + self, + images, + texts, + num_beams=1, + max_new_tokens=20, + min_length=1, + top_p=0.9, + repetition_penalty=1, + length_penalty=1, + temperature=1, + do_sample=False, + stop_words_ids=[2], + ): + ''' + function for generate test use + ''' + + stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub( + stops=[torch.tensor([i]).to(self.device) for i in stop_words_ids])]) + + img_embeds, atts_img = self.encode_img(images.to(self.device)) + image_lists = [[image_emb[None]] for image_emb in img_embeds] + + batch_embs = [self.get_context_emb(text, img_list) for text, img_list in zip(texts, image_lists)] + + batch_size = len(batch_embs) + max_len = max([emb.shape[1] for emb in batch_embs]) + emb_dim = batch_embs[0].shape[2] + dtype = batch_embs[0].dtype + device = batch_embs[0].device + + embs = torch.zeros([batch_size, max_len, emb_dim], dtype=dtype, device=device) + attn_mask = torch.zeros([batch_size, max_len], dtype=torch.int, device=device) + for i, emb in enumerate(batch_embs): + emb_len = emb.shape[1] + embs[i, -emb_len:] = emb[0] + attn_mask[i, -emb_len:] = 1 + + with self.maybe_autocast(): + outputs = self.llama_model.generate( + inputs_embeds=embs, + attention_mask=attn_mask, + max_new_tokens=max_new_tokens, + num_beams=num_beams, + length_penalty=length_penalty, + temperature=temperature, + do_sample=do_sample, + min_length=min_length, + top_p=top_p, + repetition_penalty=repetition_penalty, + bos_token_id = 50256 + # stopping_criteria=stopping_criteria, + ) + + # with self.maybe_autocast(): + # outputs = self.llama_model.generate( + # inputs_embeds=embs, + # attention_mask=attn_mask, + # max_new_tokens=max_new_tokens, + # num_beams=num_beams, + # do_sample=do_sample, + # # stopping_criteria=stopping_criteria, + # ) + answers = [] + for output_token in outputs: + if output_token[0] == 0: + output_token = output_token[1:] + output_texts = self.llama_tokenizer.decode(output_token, skip_special_tokens=True) + output_texts = output_texts.split('')[0] # remove the stop sign + output_texts = output_texts.replace("", "") + output_texts = output_texts.split(r'[/INST]')[-1].strip() + answers.append(output_texts) + + return answers + + @torch.no_grad() + def multi_select(self, images, texts, answers, num_cand=None): + all_losses = [] + for answer in answers: + choice_samples = { + 'image': images, + 'instruction_input': texts, + 'answer': answer + } + loss = self.forward(choice_samples)['loss'].reshape(-1, 1) + all_losses.append(loss) + torch.cuda.empty_cache() + all_losses = torch.cat(all_losses, dim=-1) + if num_cand is not None: + for i in range(all_losses.shape[0]): + all_losses[i, num_cand[i]:] = 9999 + output_class_ranks = torch.argsort(all_losses, dim=-1) + return output_class_ranks.tolist() diff --git a/minigpt4/models/minigpt_v2.py b/minigpt4/models/minigpt_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..01d7a9517a6ed7d3491d03abc692e1b7d9a913cd --- /dev/null +++ b/minigpt4/models/minigpt_v2.py @@ -0,0 +1,195 @@ +import logging +import random + +import torch +from torch.cuda.amp import autocast as autocast +import torch.nn as nn + +from minigpt4.common.registry import registry +from minigpt4.models.base_model import disabled_train +from minigpt4.models.minigpt_base import MiniGPTBase +from minigpt4.models.Qformer import BertConfig, BertLMHeadModel + + +@registry.register_model("minigpt_v2") +class MiniGPTv2(MiniGPTBase): + """ + MiniGPT-v2 model + """ + + PRETRAINED_MODEL_CONFIG_DICT = { + "pretrain": "configs/models/minigpt_v2.yaml", + } + + def __init__( + self, + vit_model="eva_clip_g", + img_size=448, + drop_path_rate=0, + use_grad_checkpoint=False, + vit_precision="fp16", + freeze_vit=True, + llama_model="", + prompt_template='###Human: {} ###Assistant: ', + max_txt_len=300, + end_sym='\n', + lora_r=64, + lora_target_modules=['query_key_value','dense'], + lora_alpha=16, + lora_dropout=0.05, + chat_template=False, + use_grad_checkpoint_llm=False, + max_context_len=3800, + low_resource=False, # use 8 bit and put vit in cpu + device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore. + ): + super().__init__( + vit_model=vit_model, + img_size=img_size, + drop_path_rate=drop_path_rate, + use_grad_checkpoint=use_grad_checkpoint, + vit_precision=vit_precision, + freeze_vit=freeze_vit, + llama_model=llama_model, + max_txt_len=max_txt_len, + max_context_len=max_context_len, + end_sym=end_sym, + prompt_template=prompt_template, + low_resource=low_resource, + device_8bit=device_8bit, + lora_r=lora_r, + lora_target_modules=lora_target_modules, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + + print('Loading Q-Former') + self.Qformer, self.query_tokens = self.init_Qformer( + num_query_token = 32, vision_width = self.visual_encoder.num_features, freeze = False + ) + self.load_from_pretrained(url_or_filename="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") # load q-former weights here + + img_f_dim = self.Qformer.config.hidden_size + print('Loading Q-Former Done') + + # img_f_dim = self.visual_encoder.num_features * 4 + self.llama_proj = nn.Linear( + self.Qformer.config.hidden_size, 4096 + ) + self.llama_proj2 = nn.Linear( + 4096, self.llama_model.config.hidden_size + ) + self.chat_template = chat_template + + if use_grad_checkpoint_llm: + self.llama_model.gradient_checkpointing_enable() + + @classmethod + def init_Qformer(cls, num_query_token, vision_width, freeze): + encoder_config = BertConfig.from_pretrained("bert-base-uncased") + encoder_config.encoder_width = vision_width + # insert cross-attention layer every other block + encoder_config.add_cross_attention = True + encoder_config.cross_attention_freq = 2 + encoder_config.query_length = num_query_token + Qformer = BertLMHeadModel(config=encoder_config) + query_tokens = nn.Parameter( + torch.zeros(1, num_query_token, encoder_config.hidden_size) + ) + query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) + + Qformer.cls = None + Qformer.bert.embeddings.word_embeddings = None + Qformer.bert.embeddings.position_embeddings = None + for layer in Qformer.bert.encoder.layer: + layer.output = None + layer.intermediate = None + + if freeze: + for name, param in Qformer.named_parameters(): + param.requires_grad = False + Qformer = Qformer.eval() + Qformer.train = disabled_train + query_tokens.requires_grad = False + logging.info("freeze Qformer") + + return Qformer, query_tokens + + def encode_img(self, image): + device = image.device + + if len(image.shape) > 4: + image = image.reshape(-1, *image.shape[-3:]) + + with self.maybe_autocast(): + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) + # image_embeds = image_embeds[:, 1:, :] + # bs, pn, hs = image_embeds.shape + # image_embeds = image_embeds.view(bs, int(pn / 4), int(hs * 4)) + + # inputs_llama = self.llama_proj(image_embeds) + # atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device) + image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) + + query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) + query_output = self.Qformer.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + inputs_llama = self.llama_proj(query_output.last_hidden_state) + inputs_llama = self.llama_proj2(inputs_llama) + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device) + return inputs_llama, atts_llama + + @classmethod + def from_config(cls, cfg): + vit_model = cfg.get("vit_model", "eva_clip_g") + img_size = cfg.get("image_size") + llama_model = cfg.get("llama_model") + + drop_path_rate = cfg.get("drop_path_rate", 0) + use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) + vit_precision = cfg.get("vit_precision", "fp16") + freeze_vit = cfg.get("freeze_vit", True) + low_resource = cfg.get("low_resource", False) + + prompt_template = cfg.get("prompt_template", '[INST] {} [/INST]') + max_txt_len = cfg.get("max_txt_len", 300) + end_sym = cfg.get("end_sym", '\n') + + lora_r = cfg.get("lora_r", 64) + lora_alpha = cfg.get("lora_alpha", 16) + chat_template = cfg.get("chat_template", False) + + use_grad_checkpoint_llm = cfg.get("use_grad_checkpoint_llm", False) + max_context_len = cfg.get("max_context_len", 3800) + + model = cls( + vit_model=vit_model, + img_size=img_size, + drop_path_rate=drop_path_rate, + use_grad_checkpoint=use_grad_checkpoint, + vit_precision=vit_precision, + freeze_vit=freeze_vit, + llama_model=llama_model, + prompt_template=prompt_template, + max_txt_len=max_txt_len, + low_resource=low_resource, + end_sym=end_sym, + lora_r=lora_r, + lora_alpha=lora_alpha, + chat_template=chat_template, + use_grad_checkpoint_llm=use_grad_checkpoint_llm, + max_context_len=max_context_len, + ) + + ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 + if ckpt_path: + print("Load Minigpt-4-LLM Checkpoint: {}".format(ckpt_path)) + ckpt = torch.load(ckpt_path, map_location="cpu") + msg = model.load_state_dict(ckpt['model'], strict=False) + + return model diff --git a/minigpt4/models/modeling_llama.py b/minigpt4/models/modeling_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..5d59a53faf45ef55cf127714489201d84a9364d9 --- /dev/null +++ b/minigpt4/models/modeling_llama.py @@ -0,0 +1,111 @@ +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss + +from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.models.llama.modeling_llama import LLAMA_INPUTS_DOCSTRING, _CONFIG_FOR_DOC +from transformers.models.llama.modeling_llama import LlamaForCausalLM as LlamaForCausalLMOrig + + +class LlamaForCausalLM(LlamaForCausalLMOrig): + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + reduction: Optional[str] = "mean", + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if hasattr(self.config, 'pretraining_tp') and self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(reduction=reduction) + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + if reduction == "none": + loss = loss.view(logits.size(0), -1).mean(1) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/minigpt4/models/modeling_phi.py b/minigpt4/models/modeling_phi.py new file mode 100644 index 0000000000000000000000000000000000000000..607fbb7118909dea029f3af6102d31ed359c380a --- /dev/null +++ b/minigpt4/models/modeling_phi.py @@ -0,0 +1,1052 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +# +# Copyright (c) 2022, Tri Dao, trid@cs.stanford.edu. +# Licensed under the BSD 3-Clause License. + +from __future__ import annotations + +import math +from dataclasses import dataclass, field +from typing import Any, Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn +from einops import rearrange, repeat +from transformers import PretrainedConfig, PreTrainedModel +from transformers.activations import ACT2FN +from transformers.modeling_outputs import CausalLMOutputWithPast + +from .configuration_phi import PhiConfig + +try: + from flash_attn.bert_padding import pad_input, unpad_input + from flash_attn.layers.rotary import RotaryEmbedding as FlashRotaryEmbedding + from flash_attn.modules.mha import FlashCrossAttention, FlashSelfAttention + from flash_attn.ops.fused_dense import FusedDense +except: + pad_input, unpad_input = None, None + FlashRotaryEmbedding = None + FlashSelfAttention, FlashCrossAttention = None, None + FusedDense = None + + +@dataclass +class InferenceParams: + """Inference parameters passed to model to efficiently calculate + and store context during inference. + + Reference: + https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/utils/generation.py. + + Args: + max_seqlen: Maximum sequence length. + max_batch_size: Maximum batch size. + seqlen_offset: Sequence length offset. + batch_size_offset: Batch size offset. + key_value_memory_dict: Key value memory dictionary. + lengths_per_sample: Lengths per sample. + + """ + + max_seqlen: int = field(metadata={"help": "Maximum sequence length."}) + + max_batch_size: int = field(metadata={"help": "Maximum batch size."}) + + seqlen_offset: int = field(default=0, metadata={"help": "Sequence length offset."}) + + batch_size_offset: int = field(default=0, metadata={"help": "Batch size offset."}) + + key_value_memory_dict: Dict[str, Any] = field( + default_factory=dict, metadata={"help": "Key value memory dictionary."} + ) + + lengths_per_sample: torch.Tensor = field(default=None, metadata={"help": "Lengths per sample."}) + + +class Embedding(nn.Module): + """Token embedding with dropout.""" + + def __init__(self, config: PretrainedConfig) -> None: + super().__init__() + + self.wte = nn.Embedding(config.vocab_size, config.n_embd) + self.drop = nn.Dropout(config.embd_pdrop) + + def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor: + hidden_states = self.wte(input_ids) + hidden_states = self.drop(hidden_states) + + return hidden_states + + +def _apply_rotary_emb( + x: torch.FloatTensor, + cos: torch.FloatTensor, + sin: torch.FloatTensor, +) -> torch.FloatTensor: + _, seqlen, _, _ = x.shape + _, rotary_dim = cos.shape + rotary_dim *= 2 + + x_rot = x[:, :, :, :rotary_dim] + x_pass = x[:, :, :, rotary_dim:] + + x1, x2 = x_rot.chunk(2, dim=-1) + c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d") + x1, x2, c, s = [t.to(dtype=torch.float32) for t in [x1, x2, c, s]] + + x_rot = torch.cat([x1 * c - x2 * s, x1 * s + x2 * c], axis=-1).to(x.dtype) + + return torch.cat([x_rot, x_pass], axis=-1) + + +def _apply_rotary_emb_kv( + kv: torch.FloatTensor, + cos: torch.FloatTensor, + sin: torch.FloatTensor, + cos_k: Optional[torch.FloatTensor] = None, + sin_k: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + _, seqlen, _, _, _ = kv.shape + _, rotary_dim = cos.shape + rotary_dim *= 2 + + k_rot = kv[:, :, 0, :, :rotary_dim] + k_pass = kv[:, :, 0, :, rotary_dim:] + + k1, k2 = k_rot.chunk(2, dim=-1) + c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d") + k1, k2, c, s = [t.to(dtype=torch.float32) for t in [k1, k2, c, s]] + + k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(kv.dtype) + + return torch.cat( + [ + torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2), + kv[:, :, 1:2, :, :], + ], + axis=2, + ) + + +def _apply_rotary_emb_qkv( + qkv: torch.FloatTensor, + cos: torch.FloatTensor, + sin: torch.FloatTensor, + cos_k: Optional[torch.FloatTensor] = None, + sin_k: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + _, seqlen, _, _, _ = qkv.shape + _, rotary_dim = cos.shape + rotary_dim *= 2 + + q_rot = qkv[:, :, 0, :, :rotary_dim] + q_pass = qkv[:, :, 0, :, rotary_dim:] + + k_rot = qkv[:, :, 1, :, :rotary_dim] + k_pass = qkv[:, :, 1, :, rotary_dim:] + + q1, q2 = q_rot.chunk(2, dim=-1) + k1, k2 = k_rot.chunk(2, dim=-1) + c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d") + q1, q2, k1, k2, c, s = [t.to(dtype=torch.float32) for t in [q1, q2, k1, k2, c, s]] + + q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], axis=-1).to(qkv.dtype) + k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(qkv.dtype) + + return torch.cat( + [ + torch.cat([q_rot, q_pass], axis=-1).unsqueeze(2), + torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2), + qkv[:, :, 2:3, :, :], + ], + axis=2, + ) + + +class RotaryEmbedding(nn.Module): + """Rotary positional embedding (RoPE). + + Reference: + RoFormer: Enhanced Transformer with Rotary Position Embedding. + https://arxiv.org/pdf/2104.09864.pdf. + + """ + + def __init__( + self, + dim: int, + base: int = 10000, + scale_base: Optional[float] = None, + pos_idx_in_fp32: bool = True, + max_position_embeddings: int = 2048, + device: Optional[str] = None, + **kwargs, + ) -> None: + super().__init__() + + if scale_base is not None: + raise NotImplementedError + + self.dim = dim + self.base = float(base) + self.scale_base = scale_base + self.pos_idx_in_fp32 = pos_idx_in_fp32 + self.max_position_embeddings = max_position_embeddings + self.device = device + + # Generate and save the inverse frequency buffer (non-trainable) + inv_freq = self._compute_inv_freq(device) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Generate and save the scale buffer (non-trainable) + scale = ( + (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim) + if scale_base is not None + else None + ) + self.register_buffer("scale", scale, persistent=False) + + # Initialize cached attributes since ONNX can't rely on dynamic initialization + self._update_cos_sin_cache(max_position_embeddings, device=device, dtype=torch.float32) + + def _compute_inv_freq(self, device: Optional[str] = None) -> torch.FloatTensor: + return 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)) + + def _update_cos_sin_cache( + self, + seqlen: int, + device: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + ) -> None: + self._seq_len_cached = seqlen + + # fp32 is preferred since the output of `torch.arange` can be quite large + # and bf16 would lose a lot of precision + if self.pos_idx_in_fp32: + t = torch.arange(seqlen, device=device, dtype=torch.float32) + if self.inv_freq.dtype != torch.float32: + inv_freq = self._compute_inv_freq(device=device) + else: + inv_freq = self.inv_freq + else: + t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) + inv_freq = self.inv_freq + + # `torch.outer` is preferred since `torch.einsum` converts from fp32 to fp16 if used with AMP + freqs = torch.outer(t, inv_freq) + if self.scale is None: + self._cos_cached = torch.cos(freqs).to(dtype) + self._sin_cached = torch.sin(freqs).to(dtype) + else: + power = ( + torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2 + ) / self.scale_base + scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1") + + # Force the scale multiplication to happen in fp32 + self._cos_cached = (torch.cos(freqs) * scale).to(dtype) + self._sin_cached = (torch.sin(freqs) * scale).to(dtype) + self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype) + self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype) + + def forward( + self, + qkv: torch.Tensor, + kv: Optional[torch.Tensor] = None, + seqlen_offset: int = 0, + **kwargs, + ) -> Tuple[torch.Tensor, torch.Tensor]: + if ( + self._seq_len_cached < qkv.shape[1] + seqlen_offset + or self._cos_cached.device != qkv.device + or self._cos_cached.dtype != qkv.dtype + or (self.training and self._cos_cached.is_inference()) + ): + self._update_cos_sin_cache(qkv.shape[1] + seqlen_offset, device=qkv.device, dtype=qkv.dtype) + + if kv is None: + return _apply_rotary_emb_qkv( + qkv, + self._cos_cached[seqlen_offset:], + self._sin_cached[seqlen_offset:], + ) + else: + q = _apply_rotary_emb( + qkv, + self._cos_cached[seqlen_offset:], + self._sin_cached[seqlen_offset:], + ) + kv = _apply_rotary_emb_kv( + kv, + self._cos_cached[seqlen_offset:], + self._sin_cached[seqlen_offset:], + ) + + return q, kv + + +class MLP(nn.Module): + """Multi-Layer Perceptron. + + Reference: + Attention Is All You Need. + https://arxiv.org/pdf/1706.03762.pdf. + + """ + + def __init__( + self, + config: PretrainedConfig, + n_inner: Optional[int] = None, + act_fn: Optional[str] = None, + ) -> None: + super().__init__() + + act_fn = config.activation_function if act_fn is None else act_fn + + n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner + n_inner = n_inner if n_inner is not None else 4 * config.n_embd + + self.fc1 = nn.Linear(config.n_embd, n_inner) + self.fc2 = nn.Linear(n_inner, config.n_embd) + self.act = ACT2FN[act_fn] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.fc2(hidden_states) + + return hidden_states + + +class SelfAttention(nn.Module): + """Self-attention layer (compatible with PyTorch). + + Reference: + https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py. + + """ + + def __init__( + self, + causal: bool = True, + softmax_scale: Optional[float] = None, + attention_dropout: float = 0.0, + ) -> None: + super().__init__() + + self.causal = causal + self.softmax_scale = softmax_scale + self.drop = nn.Dropout(attention_dropout) + + @torch.autocast("cpu", enabled=False) + @torch.autocast("cuda", enabled=False) + def forward( + self, + qkv: torch.FloatTensor, + causal: bool = None, + key_padding_mask: Optional[torch.BoolTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + batch_size, seqlen = qkv.shape[0], qkv.shape[1] + q, k, v = qkv.unbind(dim=2) + + q = q.to(torch.float32) + k = k.to(torch.float32) + + causal = self.causal if causal is None else causal + softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1]) + + # Autocast is manually disabled to avoid `torch.einsum` performing the operation + # using float16, which might lead to overflow + scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale) + + if key_padding_mask is not None: + padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device) + padding_mask.masked_fill_(key_padding_mask, 0.0) + + scores = scores + rearrange(padding_mask, "b s -> b 1 1 s") + + if causal: + causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1) + scores = scores + causal_mask.to(dtype=scores.dtype) + + attention = torch.softmax(scores, dim=-1).to(v.dtype) + attention = self.drop(attention) + + output = torch.einsum("bhts,bshd->bthd", attention, v) + + return output + + +class CrossAttention(nn.Module): + """Cross-attention layer (compatible with PyTorch). + + Reference: + https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py. + + """ + + def __init__( + self, + causal: bool = True, + softmax_scale: Optional[float] = None, + attention_dropout: float = 0.0, + ) -> None: + super().__init__() + + self.causal = causal + self.softmax_scale = softmax_scale + self.drop = nn.Dropout(attention_dropout) + + @torch.autocast("cpu", enabled=False) + @torch.autocast("cuda", enabled=False) + def forward( + self, + q: torch.FloatTensor, + kv: torch.FloatTensor, + causal: bool = None, + key_padding_mask: Optional[torch.BoolTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + batch_size, seqlen_q = q.shape[0], q.shape[1] + seqlen_k = kv.shape[1] + + if kv.shape[3] != q.shape[2]: + kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3]) + k, v = kv.unbind(dim=2) + + q = q.to(torch.float32) + k = k.to(torch.float32) + + causal = self.causal if causal is None else causal + softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1]) + + # Autocast is manually disabled to avoid `torch.einsum` performing the operation + # using float16, which might lead to overflow + scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale) + + if key_padding_mask is not None: + padding_mask = torch.full( + (batch_size, seqlen_k), + -10000.0, + dtype=scores.dtype, + device=scores.device, + ) + padding_mask.masked_fill_(key_padding_mask, 0.0) + + scores = scores + rearrange(padding_mask, "b s -> b 1 1 s") + + if causal: + rows = rearrange(torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1") + cols = torch.arange(seqlen_k, device=k.device, dtype=torch.long) + causal_mask = cols > rows + seqlen_k - seqlen_q + + scores = scores.masked_fill(causal_mask, -10000.0) + + attention = torch.softmax(scores, dim=-1).to(v.dtype) + attention = self.drop(attention) + + output = torch.einsum("bhts,bshd->bthd", attention, v) + + return output + + +def _find_mha_dims( + config: PretrainedConfig, + n_head: Optional[int] = None, + n_head_kv: Optional[int] = None, + head_dim: Optional[int] = None, +) -> Tuple[int, int]: + if n_head is None and head_dim is None: + head_dim = config.n_embd // config.n_head + n_head = config.n_head + elif n_head is None or head_dim is None: + raise ValueError("`n_head` and `head_dim` must be both specified or `None`.") + + if n_head_kv is None: + n_head_kv = getattr(config, "n_head_kv", None) or n_head + + return n_head, n_head_kv, head_dim + + +def _update_kv_cache(kv: torch.FloatTensor, inference_params: InferenceParams, layer_idx: int) -> torch.FloatTensor: + num_heads, head_dim = kv.shape[-2:] + + if layer_idx not in inference_params.key_value_memory_dict: + inference_params.key_value_memory_dict[layer_idx] = torch.empty( + inference_params.max_batch_size, + inference_params.max_seqlen, + 2, + num_heads, + head_dim, + dtype=kv.dtype, + device=kv.device, + ) + + batch_start = inference_params.batch_size_offset + batch_end = batch_start + kv.shape[0] + + sequence_start = inference_params.seqlen_offset + sequence_end = sequence_start + kv.shape[1] + + # When the current sequence length is equal to or larger than the maximum sequence length, + # we need to concatenate the current `kv` with the cached `kv` to expand its length + if sequence_end >= inference_params.max_seqlen: + inference_params.key_value_memory_dict[layer_idx] = torch.concatenate((inference_params.key_value_memory_dict[layer_idx], kv), dim=1) + + inference_params.key_value_memory_dict[layer_idx][batch_start:batch_end, sequence_start:sequence_end, ...] = kv + kv = inference_params.key_value_memory_dict[layer_idx][batch_start:batch_end, :sequence_end, ...] + + return kv + + +class MHA(nn.Module): + """Multi-head attention layer.""" + + def __init__( + self, + config: PretrainedConfig, + dtype: Optional[torch.dtype] = None, + device: Optional[str] = None, + rotary_dim: Optional[int] = None, + rotary_base: float = 10000.0, + rotary_scale_base: Optional[float] = None, + n_head: Optional[int] = None, + n_head_kv: Optional[int] = None, + head_dim: Optional[int] = None, + bias: bool = True, + causal: bool = True, + softmax_scale: Optional[float] = None, + layer_idx: Optional[int] = None, + return_residual: bool = False, + checkpointing: bool = False, + ) -> None: + super().__init__() + + # Rotary embedding + self.rotary_dim = rotary_dim if rotary_dim is not None else getattr(config, "rotary_dim", 0) + if self.rotary_dim > 0: + rotary_cls = FlashRotaryEmbedding if config.flash_rotary else RotaryEmbedding + if rotary_cls is None: + rotary_cls = RotaryEmbedding + + rotary_kwargs = {} + if rotary_cls is RotaryEmbedding: + rotary_kwargs["max_position_embeddings"] = config.n_positions + + self.rotary_emb = rotary_cls( + self.rotary_dim, + base=rotary_base, + scale_base=rotary_scale_base, + device=device, + **rotary_kwargs, + ) + + # MLP + self.n_head, self.n_head_kv, self.head_dim = _find_mha_dims( + config, n_head=n_head, n_head_kv=n_head_kv, head_dim=head_dim + ) + op_size = self.head_dim * (self.n_head + 2 * self.n_head_kv) + hidden_size = config.n_embd + + linear_cls = FusedDense if config.fused_dense else nn.Linear + if linear_cls is None: + linear_cls = nn.Linear + + self.Wqkv = linear_cls(hidden_size, op_size, bias=bias, device=device, dtype=dtype) + self.out_proj = linear_cls(hidden_size, hidden_size, bias=bias, device=device, dtype=dtype) + + # Attention + attn_cls = FlashSelfAttention if config.flash_attn else SelfAttention + if attn_cls is None: + attn_cls = SelfAttention + + cross_attn_cls = FlashCrossAttention if config.flash_attn else CrossAttention + if cross_attn_cls is None: + cross_attn_cls = CrossAttention + + self.inner_attn = attn_cls( + causal=causal, + softmax_scale=softmax_scale, + attention_dropout=config.attn_pdrop, + ) + self.inner_cross_attn = cross_attn_cls( + causal=causal, + softmax_scale=softmax_scale, + attention_dropout=config.attn_pdrop, + ) + + self.flash_attn = config.flash_attn and attn_cls is FlashSelfAttention + self.layer_idx = layer_idx + self.return_residual = return_residual + self.checkpointing = checkpointing + + def _forward_self_attn( + self, x: torch.FloatTensor, key_padding_mask: Optional[torch.BoolTensor] + ) -> torch.FloatTensor: + qkv = self.Wqkv(x) + qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim) + + if self.rotary_dim > 0: + qkv = self.rotary_emb(qkv) + + if self.flash_attn: + batch_size, seqlen = qkv.shape[0], qkv.shape[1] + + cu_seqlens, max_seqlen = None, None + if key_padding_mask is not None: + # If `key_padding_mask` is supplied, we need to unpad the input and retrieve + # the `cu_seqlens` and `max_seqlen` to be used by `flash-attn` + qkv, indices, cu_seqlens, max_seqlen = unpad_input(qkv, key_padding_mask) + + if self.checkpointing: + attn_output = torch.utils.checkpoint.checkpoint( + self.inner_attn, qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen + ) + else: + attn_output = self.inner_attn(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen).to(qkv.device) + + # If `key_padding_mask` is supplied, we need to pad the output back to the original shape + return pad_input(attn_output, indices, batch_size, seqlen) if key_padding_mask is not None else attn_output + + if self.checkpointing: + return torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, key_padding_mask=key_padding_mask) + + return self.inner_attn(qkv, key_padding_mask=key_padding_mask) + + def _forward_cross_attn( + self, + x: torch.FloatTensor, + past_key_values: Optional[InferenceParams], + key_padding_mask: Optional[torch.BoolTensor], + ) -> torch.FloatTensor: + batch_size = x.shape[0] + + qkv = self.Wqkv(x) + + q = qkv[..., : self.n_head * self.head_dim] + q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim) + + kv = qkv[..., self.n_head * self.head_dim :] + kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim) + + seqlen_offset = past_key_values.seqlen_offset if past_key_values is not None else 0 + causal = None if seqlen_offset == 0 else False + if self.rotary_dim > 0: + q, kv = self.rotary_emb(q, kv=kv, seqlen_offset=seqlen_offset) + + if past_key_values is not None: + kv = _update_kv_cache(kv, past_key_values, self.layer_idx) + + if self.flash_attn: + batch_size, seqlen_q = q.shape[0], q.shape[1] + seqlen_k = kv.shape[1] + + cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k = ( + None, + None, + None, + None, + ) + if key_padding_mask is not None: + kv, _, cu_seqlens_k, max_seqlen_k = unpad_input(kv, key_padding_mask) + + if seqlen_q == 1: + key_padding_mask = torch.ones(batch_size, 1, device=q.device) + elif seqlen_q != seqlen_k: + key_padding_mask = key_padding_mask[:, -seqlen_q:] + + q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, key_padding_mask) + + if self.checkpointing: + attn_output = torch.utils.checkpoint.checkpoint( + self.inner_cross_attn, + q, + kv, + causal=causal, + cu_seqlens=cu_seqlens_q, + max_seqlen=max_seqlen_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_k=max_seqlen_k, + ) + else: + attn_output = self.inner_cross_attn( + q, + kv, + causal=causal, + cu_seqlens=cu_seqlens_q, + max_seqlen=max_seqlen_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_k=max_seqlen_k, + ) + + return ( + pad_input(attn_output, indices_q, batch_size, max_seqlen_q) + if key_padding_mask is not None + else attn_output + ) + + if self.checkpointing: + return torch.utils.checkpoint.checkpoint( + self.inner_cross_attn, + q, + kv, + key_padding_mask=key_padding_mask, + causal=causal, + ) + + return self.inner_cross_attn(q, kv, key_padding_mask=key_padding_mask, causal=causal) + + def forward( + self, + x: torch.FloatTensor, + past_key_values: Optional[InferenceParams] = None, + attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: + if attention_mask is not None: + attention_mask = attention_mask.bool() + else: + attention_mask = None + + # MHA + if self.n_head == self.n_head_kv: + if past_key_values is None: + # If `past_key_values` are not supplied, we run self-attention + attn_output = self._forward_self_attn(x, attention_mask) + else: + # If `past_key_values` are supplied, it means that we might have cached values and + # could take advantage of cross-attention + attn_output = self._forward_cross_attn(x, past_key_values, attention_mask) + # MQA / GQA + else: + # Regardless of `past_key_values` being supplied or not, it always use cross-attention + # because `q` and `kv` lengths might be different + attn_output = self._forward_cross_attn(x, past_key_values, attention_mask) + + output = rearrange(attn_output, "... h d -> ... (h d)") + output = self.out_proj(output) + + return output if not self.return_residual else (output, x) + + +class ParallelBlock(nn.Module): + """Parallel block. + + This block applies parallel mixer and MLP layers to the input (used in GPT-J and CodeGen). + + """ + + def __init__( + self, + config: PretrainedConfig, + block_idx: Optional[int] = None, + ) -> None: + super().__init__() + + self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + self.block_idx = block_idx + + self.mixer = MHA(config, layer_idx=block_idx) + self.mlp = MLP(config) + + def forward( + self, + hidden_states: torch.FloatTensor, + past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, + attention_mask: Optional[torch.BoolTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + residual = hidden_states + hidden_states = self.ln(hidden_states) + + attn_outputs = self.mixer( + hidden_states, + past_key_values=past_key_values, + attention_mask=attention_mask, + ) + if isinstance(attn_outputs, tuple): + attn_outputs = attn_outputs[0] + + attn_outputs = self.resid_dropout(attn_outputs) + feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states)) + + hidden_states = attn_outputs + feed_forward_hidden_states + residual + + return hidden_states + + +class CausalLMHead(nn.Module): + """Causal Language Modeling head. + + Reference: + Improving Language Understanding by Generative Pre-Training. + https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf. + + """ + + def __init__(self, config: PretrainedConfig) -> None: + super().__init__() + + self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) + self.linear = nn.Linear(config.n_embd, config.vocab_size) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = self.ln(hidden_states) + logits = self.linear(hidden_states).to(torch.float32) + + return logits + + +class CausalLMLoss(nn.Module): + """Causal Language Modeling loss. + + Reference: + Improving Language Understanding by Generative Pre-Training. + https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf. + + """ + + def __init__(self, shift_labels: bool = True) -> None: + super().__init__() + + self.shift_labels = shift_labels + self.loss_fct = nn.CrossEntropyLoss() + + def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor) -> torch.FloatTensor: + if self.shift_labels: + logits = logits[..., :-1, :].contiguous() + labels = labels[..., 1:].contiguous() + + loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) + + return loss + + +class PhiPreTrainedModel(PreTrainedModel): + """Phi pre-trained model.""" + + config_class = PhiConfig + base_model_prefix = "transformer" + supports_gradient_checkpointing = False + _no_split_modules = ["ParallelBlock"] + + def __init__(self, *inputs, **kwargs) -> None: + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module: nn.Module) -> None: + if isinstance(module, (nn.Linear,)): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + if module.bias is not None: + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + # def prepare_inputs_for_generation( + # self, + # input_ids: torch.LongTensor, + # past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, + # attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None, + # **kwargs, + # ) -> Dict[str, Any]: + # if past_key_values is None or not (isinstance(past_key_values, InferenceParams)): + # past_key_values = InferenceParams( + # max_seqlen=self.config.n_positions, + # max_batch_size=input_ids.shape[0], + # seqlen_offset=0, + # batch_size_offset=0, + # key_value_memory_dict={}, + # lengths_per_sample=None, + # ) + # else: + # # Assume that `past_key_values` has cached all tokens up to the last token in `input_ids` + # past_key_values.seqlen_offset = input_ids.shape[1] - 1 + # input_ids = input_ids[:, -1].unsqueeze(-1) + + # return { + # "input_ids": input_ids, + # "past_key_values": past_key_values, + # "attention_mask": attention_mask, + # } + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor = None, # Make `input_ids` optional. + past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, + attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, # Add `inputs_embeds` argument. + **kwargs, + ) -> Dict[str, Any]: + if past_key_values is None or not (isinstance(past_key_values, InferenceParams)): + past_key_values = InferenceParams( + max_seqlen=self.config.n_positions, + max_batch_size=(input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]), + seqlen_offset=0, + batch_size_offset=0, + key_value_memory_dict={}, + lengths_per_sample=None, + ) + else: + if input_ids is not None: + past_key_values.seqlen_offset = input_ids.shape[1] - 1 + input_ids = input_ids[:, -1].unsqueeze(-1) + elif inputs_embeds is not None: + past_key_values.seqlen_offset = inputs_embeds.shape[1] - 1 + inputs_embeds = inputs_embeds if past_key_values.seqlen_offset == 0 else None + + return { + "input_ids": input_ids, + "past_key_values": past_key_values, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, # Add `inputs_embeds` to the returned dict. + } + + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation + # def prepare_inputs_for_generation( + # self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + # ): + # if past_key_values is not None: + # if isinstance(past_key_values, Cache): + # cache_length = past_key_values.get_seq_length() + # past_length = past_key_values.seen_tokens + # max_cache_length = past_key_values.get_max_length() + # else: + # cache_length = past_length = past_key_values[0][0].shape[2] + # max_cache_length = None + + # # Keep only the unprocessed tokens: + # # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as + # # input) + # if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + # input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # # input_ids based on the past_length. + # elif past_length < input_ids.shape[1]: + # input_ids = input_ids[:, past_length:] + # # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + # if ( + # max_cache_length is not None + # and attention_mask is not None + # and cache_length + input_ids.shape[1] > max_cache_length + # ): + # attention_mask = attention_mask[:, -max_cache_length:] + + # position_ids = kwargs.get("position_ids", None) + # if attention_mask is not None and position_ids is None: + # # create position_ids on the fly for batch generation + # position_ids = attention_mask.long().cumsum(-1) - 1 + # position_ids.masked_fill_(attention_mask == 0, 1) + # if past_key_values: + # position_ids = position_ids[:, -input_ids.shape[1] :] + + # # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + # if inputs_embeds is not None and past_key_values is None: + # model_inputs = {"inputs_embeds": inputs_embeds} + # else: + # model_inputs = {"input_ids": input_ids} + + # model_inputs.update( + # { + # "position_ids": position_ids, + # "past_key_values": past_key_values, + # "use_cache": kwargs.get("use_cache"), + # "attention_mask": attention_mask, + # } + # ) + # return model_inputs + + +class PhiModel(PhiPreTrainedModel): + """Phi model.""" + + _keys_to_ignore_on_load_missing = [""] + _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"] + + def __init__(self, config: PhiConfig) -> None: + super().__init__(config) + + self.embd = Embedding(config) + self.h = nn.ModuleList([ParallelBlock(config, block_idx=i) for i in range(config.n_layer)]) + self.gradient_checkpointing = False + self.post_init() + + def get_input_embeddings(self) -> nn.Embedding: + return self.embd.wte + + def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None: + self.embd.wte = new_embeddings + + def forward( + self, + input_ids: torch.LongTensor = None, + past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, + attention_mask: Optional[torch.BoolTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + if inputs_embeds is None: + hidden_states = self.embd(input_ids) + elif inputs_embeds is not None: + hidden_states = inputs_embeds + + for layer in self.h: + hidden_states = layer( + hidden_states, + past_key_values=past_key_values, + attention_mask=attention_mask, + ) + + return hidden_states + + +class PhiForCausalLM(PhiPreTrainedModel): + """Phi for Causal Language Modeling.""" + + _keys_to_ignore_on_load_missing = [""] + _keys_to_ignore_on_load_unexpected = [r"transformer\.h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"] + + def __init__(self, config: PhiConfig) -> None: + super().__init__(config) + + self.transformer = PhiModel(config) + self.lm_head = CausalLMHead(config) + self.loss = CausalLMLoss() + + self.post_init() + + def get_output_embeddings(self) -> nn.Linear: + return self.lm_head.linear + + def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: + self.lm_head.linear = new_embeddings + + def forward( + self, + input_ids: torch.LongTensor = None, + past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, + attention_mask: Optional[torch.BoolTensor] = None, + labels: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + **kwargs, + ) -> CausalLMOutputWithPast: + hidden_states = self.transformer(input_ids, inputs_embeds = inputs_embeds, past_key_values=past_key_values, attention_mask=attention_mask) + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + loss = self.loss(lm_logits, labels) + + return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=past_key_values) diff --git a/minigpt4/models/text.py b/minigpt4/models/text.py new file mode 100644 index 0000000000000000000000000000000000000000..48fa1bd2f976d79304e39c59c9f5f2da74c99665 --- /dev/null +++ b/minigpt4/models/text.py @@ -0,0 +1,13 @@ +import torch +from transformers import PhiForCausalLM +from transformers import AutoTokenizer + +torch.set_default_device("cuda") +model = PhiForCausalLM.from_pretrained("/root/autodl-tmp/phi-2", torch_dtype="auto", trust_remote_code=True) +tokenizer = AutoTokenizer.from_pretrained("/root/autodl-tmp/phi-2", trust_remote_code=True) +inputs = tokenizer('Hello? How are u?', return_tensors="pt", return_attention_mask=False) +print(inputs) +embeddings = model.module.embd(inputs) +outputs = model.generate(**inputs, max_length=200) +text = tokenizer.batch_decode(outputs)[0] +print(text) diff --git a/minigpt4/processors/__init__.py b/minigpt4/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e560eaa15f3266dbc1ffbca70bdc791901737a60 --- /dev/null +++ b/minigpt4/processors/__init__.py @@ -0,0 +1,33 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from minigpt4.processors.base_processor import BaseProcessor +from minigpt4.processors.blip_processors import ( + Blip2ImageTrainProcessor, + Blip2ImageEvalProcessor, + BlipCaptionProcessor, +) + +from minigpt4.common.registry import registry + +__all__ = [ + "BaseProcessor", + "Blip2ImageTrainProcessor", + "Blip2ImageEvalProcessor", + "BlipCaptionProcessor", +] + + +def load_processor(name, cfg=None): + """ + Example + + >>> processor = load_processor("alpro_video_train", cfg=None) + """ + processor = registry.get_processor_class(name).from_config(cfg) + + return processor diff --git a/minigpt4/processors/__pycache__/__init__.cpython-39.pyc b/minigpt4/processors/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..048b2bb85f368f18ca906bb212a20db71c058920 Binary files /dev/null and b/minigpt4/processors/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/processors/__pycache__/base_processor.cpython-39.pyc b/minigpt4/processors/__pycache__/base_processor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1af4f993a9cc839f0cec7421f0e8cd62d88e6921 Binary files /dev/null and b/minigpt4/processors/__pycache__/base_processor.cpython-39.pyc differ diff --git a/minigpt4/processors/__pycache__/blip_processors.cpython-39.pyc b/minigpt4/processors/__pycache__/blip_processors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2461f0abd1471433f3572ed7721fb8ce4ba01b7 Binary files /dev/null and b/minigpt4/processors/__pycache__/blip_processors.cpython-39.pyc differ diff --git a/minigpt4/processors/__pycache__/randaugment.cpython-39.pyc b/minigpt4/processors/__pycache__/randaugment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7cd9e5a615602dd4d4bffead6bcf0dfdb1703f5 Binary files /dev/null and b/minigpt4/processors/__pycache__/randaugment.cpython-39.pyc differ diff --git a/minigpt4/processors/base_processor.py b/minigpt4/processors/base_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..39b33cdf8fcd97cfd3e4a5fbece6593357af9d41 --- /dev/null +++ b/minigpt4/processors/base_processor.py @@ -0,0 +1,26 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from omegaconf import OmegaConf + + +class BaseProcessor: + def __init__(self): + self.transform = lambda x: x + return + + def __call__(self, item): + return self.transform(item) + + @classmethod + def from_config(cls, cfg=None): + return cls() + + def build(self, **kwargs): + cfg = OmegaConf.create(kwargs) + + return self.from_config(cfg) diff --git a/minigpt4/processors/blip_processors.py b/minigpt4/processors/blip_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..ee3f694731dd131e783818ea7d66c9f6c027fb18 --- /dev/null +++ b/minigpt4/processors/blip_processors.py @@ -0,0 +1,140 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import re + +from minigpt4.common.registry import registry +from minigpt4.processors.base_processor import BaseProcessor +from minigpt4.processors.randaugment import RandomAugment +from omegaconf import OmegaConf +from torchvision import transforms +from torchvision.transforms.functional import InterpolationMode + + +class BlipImageBaseProcessor(BaseProcessor): + def __init__(self, mean=None, std=None): + if mean is None: + mean = (0.48145466, 0.4578275, 0.40821073) + if std is None: + std = (0.26862954, 0.26130258, 0.27577711) + + self.normalize = transforms.Normalize(mean, std) + + +@registry.register_processor("blip_caption") +class BlipCaptionProcessor(BaseProcessor): + def __init__(self, prompt="", max_words=50): + self.prompt = prompt + self.max_words = max_words + + def __call__(self, caption): + caption = self.prompt + self.pre_caption(caption) + + return caption + + @classmethod + def from_config(cls, cfg=None): + if cfg is None: + cfg = OmegaConf.create() + + prompt = cfg.get("prompt", "") + max_words = cfg.get("max_words", 50) + + return cls(prompt=prompt, max_words=max_words) + + def pre_caption(self, caption): + caption = re.sub( + r"([.!\"()*#:;~])", + " ", + caption.lower(), + ) + caption = re.sub( + r"\s{2,}", + " ", + caption, + ) + caption = caption.rstrip("\n") + caption = caption.strip(" ") + + # truncate caption + caption_words = caption.split(" ") + if len(caption_words) > self.max_words: + caption = " ".join(caption_words[: self.max_words]) + + return caption + + +@registry.register_processor("blip2_image_train") +class Blip2ImageTrainProcessor(BlipImageBaseProcessor): + def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0): + super().__init__(mean=mean, std=std) + + self.transform = transforms.Compose( + [ + transforms.Resize( + (image_size,image_size), + interpolation=InterpolationMode.BICUBIC, + ), + transforms.ToTensor(), + self.normalize, + ] + ) + + def __call__(self, item): + return self.transform(item) + + @classmethod + def from_config(cls, cfg=None): + if cfg is None: + cfg = OmegaConf.create() + + image_size = cfg.get("image_size", 224) + + mean = cfg.get("mean", None) + std = cfg.get("std", None) + + min_scale = cfg.get("min_scale", 0.5) + max_scale = cfg.get("max_scale", 1.0) + + return cls( + image_size=image_size, + mean=mean, + std=std, + min_scale=min_scale, + max_scale=max_scale, + ) + + +@registry.register_processor("blip2_image_eval") +class Blip2ImageEvalProcessor(BlipImageBaseProcessor): + def __init__(self, image_size=224, mean=None, std=None): + super().__init__(mean=mean, std=std) + + self.transform = transforms.Compose( + [ + transforms.Resize( + (image_size, image_size), interpolation=InterpolationMode.BICUBIC + ), + transforms.ToTensor(), + self.normalize, + ] + ) + + def __call__(self, item): + return self.transform(item) + + @classmethod + def from_config(cls, cfg=None): + if cfg is None: + cfg = OmegaConf.create() + + image_size = cfg.get("image_size", 224) + + mean = cfg.get("mean", None) + std = cfg.get("std", None) + + return cls(image_size=image_size, mean=mean, std=std) diff --git a/minigpt4/processors/randaugment.py b/minigpt4/processors/randaugment.py new file mode 100644 index 0000000000000000000000000000000000000000..7034a49ad5fc63b97910790017432617ff4c6d7b --- /dev/null +++ b/minigpt4/processors/randaugment.py @@ -0,0 +1,398 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import cv2 +import numpy as np + +import torch + + +## aug functions +def identity_func(img): + return img + + +def autocontrast_func(img, cutoff=0): + """ + same output as PIL.ImageOps.autocontrast + """ + n_bins = 256 + + def tune_channel(ch): + n = ch.size + cut = cutoff * n // 100 + if cut == 0: + high, low = ch.max(), ch.min() + else: + hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) + low = np.argwhere(np.cumsum(hist) > cut) + low = 0 if low.shape[0] == 0 else low[0] + high = np.argwhere(np.cumsum(hist[::-1]) > cut) + high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0] + if high <= low: + table = np.arange(n_bins) + else: + scale = (n_bins - 1) / (high - low) + offset = -low * scale + table = np.arange(n_bins) * scale + offset + table[table < 0] = 0 + table[table > n_bins - 1] = n_bins - 1 + table = table.clip(0, 255).astype(np.uint8) + return table[ch] + + channels = [tune_channel(ch) for ch in cv2.split(img)] + out = cv2.merge(channels) + return out + + +def equalize_func(img): + """ + same output as PIL.ImageOps.equalize + PIL's implementation is different from cv2.equalize + """ + n_bins = 256 + + def tune_channel(ch): + hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) + non_zero_hist = hist[hist != 0].reshape(-1) + step = np.sum(non_zero_hist[:-1]) // (n_bins - 1) + if step == 0: + return ch + n = np.empty_like(hist) + n[0] = step // 2 + n[1:] = hist[:-1] + table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8) + return table[ch] + + channels = [tune_channel(ch) for ch in cv2.split(img)] + out = cv2.merge(channels) + return out + + +def rotate_func(img, degree, fill=(0, 0, 0)): + """ + like PIL, rotate by degree, not radians + """ + H, W = img.shape[0], img.shape[1] + center = W / 2, H / 2 + M = cv2.getRotationMatrix2D(center, degree, 1) + out = cv2.warpAffine(img, M, (W, H), borderValue=fill) + return out + + +def solarize_func(img, thresh=128): + """ + same output as PIL.ImageOps.posterize + """ + table = np.array([el if el < thresh else 255 - el for el in range(256)]) + table = table.clip(0, 255).astype(np.uint8) + out = table[img] + return out + + +def color_func(img, factor): + """ + same output as PIL.ImageEnhance.Color + """ + ## implementation according to PIL definition, quite slow + # degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis] + # out = blend(degenerate, img, factor) + # M = ( + # np.eye(3) * factor + # + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor) + # )[np.newaxis, np.newaxis, :] + M = np.float32( + [[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]] + ) * factor + np.float32([[0.114], [0.587], [0.299]]) + out = np.matmul(img, M).clip(0, 255).astype(np.uint8) + return out + + +def contrast_func(img, factor): + """ + same output as PIL.ImageEnhance.Contrast + """ + mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299])) + table = ( + np.array([(el - mean) * factor + mean for el in range(256)]) + .clip(0, 255) + .astype(np.uint8) + ) + out = table[img] + return out + + +def brightness_func(img, factor): + """ + same output as PIL.ImageEnhance.Contrast + """ + table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8) + out = table[img] + return out + + +def sharpness_func(img, factor): + """ + The differences the this result and PIL are all on the 4 boundaries, the center + areas are same + """ + kernel = np.ones((3, 3), dtype=np.float32) + kernel[1][1] = 5 + kernel /= 13 + degenerate = cv2.filter2D(img, -1, kernel) + if factor == 0.0: + out = degenerate + elif factor == 1.0: + out = img + else: + out = img.astype(np.float32) + degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :] + out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate) + out = out.astype(np.uint8) + return out + + +def shear_x_func(img, factor, fill=(0, 0, 0)): + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, factor, 0], [0, 1, 0]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def translate_x_func(img, offset, fill=(0, 0, 0)): + """ + same output as PIL.Image.transform + """ + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, 0, -offset], [0, 1, 0]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def translate_y_func(img, offset, fill=(0, 0, 0)): + """ + same output as PIL.Image.transform + """ + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, 0, 0], [0, 1, -offset]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def posterize_func(img, bits): + """ + same output as PIL.ImageOps.posterize + """ + out = np.bitwise_and(img, np.uint8(255 << (8 - bits))) + return out + + +def shear_y_func(img, factor, fill=(0, 0, 0)): + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, 0, 0], [factor, 1, 0]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def cutout_func(img, pad_size, replace=(0, 0, 0)): + replace = np.array(replace, dtype=np.uint8) + H, W = img.shape[0], img.shape[1] + rh, rw = np.random.random(2) + pad_size = pad_size // 2 + ch, cw = int(rh * H), int(rw * W) + x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H) + y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W) + out = img.copy() + out[x1:x2, y1:y2, :] = replace + return out + + +### level to args +def enhance_level_to_args(MAX_LEVEL): + def level_to_args(level): + return ((level / MAX_LEVEL) * 1.8 + 0.1,) + + return level_to_args + + +def shear_level_to_args(MAX_LEVEL, replace_value): + def level_to_args(level): + level = (level / MAX_LEVEL) * 0.3 + if np.random.random() > 0.5: + level = -level + return (level, replace_value) + + return level_to_args + + +def translate_level_to_args(translate_const, MAX_LEVEL, replace_value): + def level_to_args(level): + level = (level / MAX_LEVEL) * float(translate_const) + if np.random.random() > 0.5: + level = -level + return (level, replace_value) + + return level_to_args + + +def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value): + def level_to_args(level): + level = int((level / MAX_LEVEL) * cutout_const) + return (level, replace_value) + + return level_to_args + + +def solarize_level_to_args(MAX_LEVEL): + def level_to_args(level): + level = int((level / MAX_LEVEL) * 256) + return (level,) + + return level_to_args + + +def none_level_to_args(level): + return () + + +def posterize_level_to_args(MAX_LEVEL): + def level_to_args(level): + level = int((level / MAX_LEVEL) * 4) + return (level,) + + return level_to_args + + +def rotate_level_to_args(MAX_LEVEL, replace_value): + def level_to_args(level): + level = (level / MAX_LEVEL) * 30 + if np.random.random() < 0.5: + level = -level + return (level, replace_value) + + return level_to_args + + +func_dict = { + "Identity": identity_func, + "AutoContrast": autocontrast_func, + "Equalize": equalize_func, + "Rotate": rotate_func, + "Solarize": solarize_func, + "Color": color_func, + "Contrast": contrast_func, + "Brightness": brightness_func, + "Sharpness": sharpness_func, + "ShearX": shear_x_func, + "TranslateX": translate_x_func, + "TranslateY": translate_y_func, + "Posterize": posterize_func, + "ShearY": shear_y_func, +} + +translate_const = 10 +MAX_LEVEL = 10 +replace_value = (128, 128, 128) +arg_dict = { + "Identity": none_level_to_args, + "AutoContrast": none_level_to_args, + "Equalize": none_level_to_args, + "Rotate": rotate_level_to_args(MAX_LEVEL, replace_value), + "Solarize": solarize_level_to_args(MAX_LEVEL), + "Color": enhance_level_to_args(MAX_LEVEL), + "Contrast": enhance_level_to_args(MAX_LEVEL), + "Brightness": enhance_level_to_args(MAX_LEVEL), + "Sharpness": enhance_level_to_args(MAX_LEVEL), + "ShearX": shear_level_to_args(MAX_LEVEL, replace_value), + "TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), + "TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), + "Posterize": posterize_level_to_args(MAX_LEVEL), + "ShearY": shear_level_to_args(MAX_LEVEL, replace_value), +} + + +class RandomAugment(object): + def __init__(self, N=2, M=10, isPIL=False, augs=[]): + self.N = N + self.M = M + self.isPIL = isPIL + if augs: + self.augs = augs + else: + self.augs = list(arg_dict.keys()) + + def get_random_ops(self): + sampled_ops = np.random.choice(self.augs, self.N) + return [(op, 0.5, self.M) for op in sampled_ops] + + def __call__(self, img): + if self.isPIL: + img = np.array(img) + ops = self.get_random_ops() + for name, prob, level in ops: + if np.random.random() > prob: + continue + args = arg_dict[name](level) + img = func_dict[name](img, *args) + return img + + +class VideoRandomAugment(object): + def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]): + self.N = N + self.M = M + self.p = p + self.tensor_in_tensor_out = tensor_in_tensor_out + if augs: + self.augs = augs + else: + self.augs = list(arg_dict.keys()) + + def get_random_ops(self): + sampled_ops = np.random.choice(self.augs, self.N, replace=False) + return [(op, self.M) for op in sampled_ops] + + def __call__(self, frames): + assert ( + frames.shape[-1] == 3 + ), "Expecting last dimension for 3-channels RGB (b, h, w, c)." + + if self.tensor_in_tensor_out: + frames = frames.numpy().astype(np.uint8) + + num_frames = frames.shape[0] + + ops = num_frames * [self.get_random_ops()] + apply_or_not = num_frames * [np.random.random(size=self.N) > self.p] + + frames = torch.stack( + list(map(self._aug, frames, ops, apply_or_not)), dim=0 + ).float() + + return frames + + def _aug(self, img, ops, apply_or_not): + for i, (name, level) in enumerate(ops): + if not apply_or_not[i]: + continue + args = arg_dict[name](level) + img = func_dict[name](img, *args) + return torch.from_numpy(img) + + +if __name__ == "__main__": + a = RandomAugment() + img = np.random.randn(32, 32, 3) + a(img) diff --git a/minigpt4/runners/__init__.py b/minigpt4/runners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..64e7a4d643a8b5a1714687f42d43347a94b72373 --- /dev/null +++ b/minigpt4/runners/__init__.py @@ -0,0 +1,10 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from minigpt4.runners.runner_base import RunnerBase + +__all__ = ["RunnerBase"] diff --git a/minigpt4/runners/__pycache__/__init__.cpython-39.pyc b/minigpt4/runners/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b97810ce15c78518fc991f9b779fa53a0f6a77bd Binary files /dev/null and b/minigpt4/runners/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/runners/__pycache__/runner_base.cpython-39.pyc b/minigpt4/runners/__pycache__/runner_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22bb3ac92f369dea44abdbe492ff8ebcb1c37d2 Binary files /dev/null and b/minigpt4/runners/__pycache__/runner_base.cpython-39.pyc differ diff --git a/minigpt4/runners/runner_base.py b/minigpt4/runners/runner_base.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8dc1de2c5546bdc6e1ab2f04d92813c9d023bb --- /dev/null +++ b/minigpt4/runners/runner_base.py @@ -0,0 +1,659 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import datetime +import json +import logging +import os +import time +from pathlib import Path + +import torch +import torch.distributed as dist +import webdataset as wds +from minigpt4.common.dist_utils import ( + download_cached_file, + get_rank, + get_world_size, + is_main_process, + main_process, +) +from minigpt4.common.registry import registry +from minigpt4.common.utils import is_url +from minigpt4.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset +from minigpt4.datasets.datasets.dataloader_utils import ( + IterLoader, + MultiIterLoader, + PrefetchLoader, +) +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.data import DataLoader, DistributedSampler + + +@registry.register_runner("runner_base") +class RunnerBase: + """ + A runner class to train and evaluate a model given a task and datasets. + + The runner uses pytorch distributed data parallel by default. Future release + will support other distributed frameworks. + """ + + def __init__(self, cfg, task, model, datasets, job_id): + self.config = cfg + self.job_id = job_id + + self.task = task + self.datasets = datasets + + self._model = model + + self._wrapped_model = None + self._device = None + self._optimizer = None + self._scaler = None + self._dataloaders = None + self._lr_sched = None + + self.start_epoch = 0 + + # self.setup_seeds() + self.setup_output_dir() + + @property + def device(self): + if self._device is None: + self._device = torch.device(self.config.run_cfg.device) + + return self._device + + @property + def use_distributed(self): + return self.config.run_cfg.distributed + + @property + def model(self): + """ + A property to get the DDP-wrapped model on the device. + """ + # move model to device + if self._model.device != self.device: + self._model = self._model.to(self.device) + + # distributed training wrapper + if self.use_distributed: + if self._wrapped_model is None: + self._wrapped_model = DDP( + self._model, device_ids=[self.config.run_cfg.gpu], find_unused_parameters=True + ) + else: + self._wrapped_model = self._model + + return self._wrapped_model + + @property + def optimizer(self): + # TODO make optimizer class and configurations + if self._optimizer is None: + num_parameters = 0 + p_wd, p_non_wd = [], [] + for n, p in self.model.named_parameters(): + if not p.requires_grad: + continue # frozen weights + print(n) + if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: + p_non_wd.append(p) + else: + p_wd.append(p) + num_parameters += p.data.nelement() + logging.info("number of trainable parameters: %d" % num_parameters) + optim_params = [ + { + "params": p_wd, + "weight_decay": float(self.config.run_cfg.weight_decay), + }, + {"params": p_non_wd, "weight_decay": 0}, + ] + beta2 = self.config.run_cfg.get("beta2", 0.999) + self._optimizer = torch.optim.AdamW( + optim_params, + lr=float(self.config.run_cfg.init_lr), + weight_decay=float(self.config.run_cfg.weight_decay), + betas=(0.9, beta2), + ) + + return self._optimizer + + @property + def scaler(self): + amp = self.config.run_cfg.get("amp", False) + + if amp: + if self._scaler is None: + self._scaler = torch.cuda.amp.GradScaler() + + return self._scaler + + @property + def lr_scheduler(self): + """ + A property to get and create learning rate scheduler by split just in need. + """ + if self._lr_sched is None: + lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) + + # max_epoch = self.config.run_cfg.max_epoch + max_epoch = self.max_epoch + # min_lr = self.config.run_cfg.min_lr + min_lr = self.min_lr + # init_lr = self.config.run_cfg.init_lr + init_lr = self.init_lr + + # optional parameters + decay_rate = self.config.run_cfg.get("lr_decay_rate", None) + warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) + warmup_steps = self.config.run_cfg.get("warmup_steps", 0) + iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None) + + if iters_per_epoch is None: + try: + iters_per_epoch = len(self.dataloaders['train']) + except (AttributeError, TypeError): + iters_per_epoch = 10000 + + self._lr_sched = lr_sched_cls( + optimizer=self.optimizer, + max_epoch=max_epoch, + iters_per_epoch=iters_per_epoch, + min_lr=min_lr, + init_lr=init_lr, + decay_rate=decay_rate, + warmup_start_lr=warmup_start_lr, + warmup_steps=warmup_steps, + ) + + return self._lr_sched + + @property + def dataloaders(self) -> dict: + """ + A property to get and create dataloaders by split just in need. + + If no train_dataset_ratio is provided, concatenate map-style datasets and + chain wds.DataPipe datasets separately. Training set becomes a tuple + (ConcatDataset, ChainDataset), both are optional but at least one of them is + required. The resultant ConcatDataset and ChainDataset will be sampled evenly. + + If train_dataset_ratio is provided, create a MultiIterLoader to sample + each dataset by ratios during training. + + Currently do not support multiple datasets for validation and test. + + Returns: + dict: {split_name: (tuples of) dataloader} + """ + if self._dataloaders is None: + + # concatenate map-style datasets and chain wds.DataPipe datasets separately + # training set becomes a tuple (ConcatDataset, ChainDataset), both are + # optional but at least one of them is required. The resultant ConcatDataset + # and ChainDataset will be sampled evenly. + logging.info( + "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." + ) + + batch_sizes = {dataset_name: getattr(self.config.datasets_cfg, dataset_name).batch_size + for dataset_name in self.datasets.keys()} + datasets, batch_sizes = reorg_datasets_by_split(self.datasets, batch_sizes) + self.datasets = datasets + # self.datasets = concat_datasets(datasets) + + # print dataset statistics after concatenation/chaining + for split_name in self.datasets: + if isinstance(self.datasets[split_name], tuple) or isinstance( + self.datasets[split_name], list + ): + # mixed wds.DataPipeline and torch.utils.data.Dataset + num_records = sum( + [ + len(d) + if not type(d) in [wds.DataPipeline, ChainDataset] + else 0 + for d in self.datasets[split_name] + ] + ) + + else: + if hasattr(self.datasets[split_name], "__len__"): + # a single map-style dataset + num_records = len(self.datasets[split_name]) + else: + # a single wds.DataPipeline + num_records = -1 + logging.info( + "Only a single wds.DataPipeline dataset, no __len__ attribute." + ) + + if num_records >= 0: + logging.info( + "Loaded {} records for {} split from the dataset.".format( + num_records, split_name + ) + ) + + # create dataloaders + split_names = sorted(self.datasets.keys()) + + datasets = [self.datasets[split] for split in split_names] + batch_sizes = [batch_sizes[split] for split in split_names] + is_trains = [split in self.train_splits for split in split_names] + + print("batch sizes", batch_sizes) + + collate_fns = [] + for dataset in datasets: + if isinstance(dataset, tuple) or isinstance(dataset, list): + collate_fns.append([getattr(d, "collater", None) for d in dataset]) + else: + collate_fns.append(getattr(dataset, "collater", None)) + + dataloaders = self.create_loaders( + datasets=datasets, + num_workers=self.config.run_cfg.num_workers, + batch_sizes=batch_sizes, + is_trains=is_trains, + collate_fns=collate_fns, + ) + + self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} + + return self._dataloaders + + @property + def cuda_enabled(self): + return self.device.type == "cuda" + + @property + def max_epoch(self): + return int(self.config.run_cfg.max_epoch) + + @property + def log_freq(self): + log_freq = self.config.run_cfg.get("log_freq", 50) + return int(log_freq) + + @property + def init_lr(self): + return float(self.config.run_cfg.init_lr) + + @property + def min_lr(self): + return float(self.config.run_cfg.min_lr) + + @property + def accum_grad_iters(self): + return int(self.config.run_cfg.get("accum_grad_iters", 1)) + + @property + def valid_splits(self): + valid_splits = self.config.run_cfg.get("valid_splits", []) + + if len(valid_splits) == 0: + logging.info("No validation splits found.") + + return valid_splits + + @property + def test_splits(self): + test_splits = self.config.run_cfg.get("test_splits", []) + + return test_splits + + @property + def train_splits(self): + train_splits = self.config.run_cfg.get("train_splits", []) + + if len(train_splits) == 0: + logging.info("Empty train splits.") + + return train_splits + + @property + def evaluate_only(self): + """ + Set to True to skip training. + """ + return self.config.run_cfg.evaluate + + @property + def use_dist_eval_sampler(self): + return self.config.run_cfg.get("use_dist_eval_sampler", True) + + @property + def resume_ckpt_path(self): + return self.config.run_cfg.get("resume_ckpt_path", None) + + @property + def train_loader(self): + train_dataloader = self.dataloaders["train"] + + return train_dataloader + + def setup_output_dir(self): + lib_root = Path(registry.get_path("library_root")) + + output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id + # output_dir = lib_root / self.config.run_cfg.output_dir + result_dir = output_dir / "result" + + output_dir.mkdir(parents=True, exist_ok=True) + result_dir.mkdir(parents=True, exist_ok=True) + + registry.register_path("result_dir", str(result_dir)) + registry.register_path("output_dir", str(output_dir)) + + self.result_dir = result_dir + self.output_dir = output_dir + + def train(self): + start_time = time.time() + best_agg_metric = 0 + best_epoch = 0 + + self.log_config() + + # resume from checkpoint if specified + if not self.evaluate_only and self.resume_ckpt_path is not None: + self._load_checkpoint(self.resume_ckpt_path) + + for cur_epoch in range(self.start_epoch, self.max_epoch): + # training phase + if not self.evaluate_only: + logging.info("Start training") + train_stats = self.train_epoch(cur_epoch) + self.log_stats(split_name="train", stats=train_stats) + + # evaluation phase + if len(self.valid_splits) > 0: + for split_name in self.valid_splits: + logging.info("Evaluating on {}.".format(split_name)) + + val_log = self.eval_epoch( + split_name=split_name, cur_epoch=cur_epoch + ) + if val_log is not None: + if is_main_process(): + assert ( + "agg_metrics" in val_log + ), "No agg_metrics found in validation log." + + agg_metrics = val_log["agg_metrics"] + if agg_metrics > best_agg_metric and split_name == "val": + best_epoch, best_agg_metric = cur_epoch, agg_metrics + + self._save_checkpoint(cur_epoch, is_best=True) + + val_log.update({"best_epoch": best_epoch}) + self.log_stats(val_log, split_name) + + else: + # if no validation split is provided, we just save the checkpoint at the end of each epoch. + if not self.evaluate_only: + self._save_checkpoint(cur_epoch, is_best=False) + + if self.evaluate_only: + break + + if self.config.run_cfg.distributed: + dist.barrier() + + # testing phase + test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch + self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logging.info("Training time {}".format(total_time_str)) + + def evaluate(self, cur_epoch="best", skip_reload=False): + test_logs = dict() + + if len(self.test_splits) > 0: + for split_name in self.test_splits: + test_logs[split_name] = self.eval_epoch( + split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload + ) + + return test_logs + + def train_epoch(self, epoch): + # train + self.model.train() + + return self.task.train_epoch( + epoch=epoch, + model=self.model, + data_loader=self.train_loader, + optimizer=self.optimizer, + scaler=self.scaler, + lr_scheduler=self.lr_scheduler, + cuda_enabled=self.cuda_enabled, + log_freq=self.log_freq, + accum_grad_iters=self.accum_grad_iters, + ) + + @torch.no_grad() + def eval_epoch(self, split_name, cur_epoch, skip_reload=False): + """ + Evaluate the model on a given split. + + Args: + split_name (str): name of the split to evaluate on. + cur_epoch (int): current epoch. + skip_reload_best (bool): whether to skip reloading the best checkpoint. + During training, we will reload the best checkpoint for validation. + During testing, we will use provided weights and skip reloading the best checkpoint . + """ + data_loader = self.dataloaders.get(split_name, None) + assert data_loader, "data_loader for split {} is None.".format(split_name) + + # TODO In validation, you need to compute loss as well as metrics + # TODO consider moving to model.before_evaluation() + model = self.unwrap_dist_model(self.model) + if not skip_reload and cur_epoch == "best": + model = self._reload_best_model(model) + model.eval() + + self.task.before_evaluation( + model=model, + dataset=self.datasets[split_name], + ) + results = self.task.evaluation(model, data_loader) + + if results is not None: + return self.task.after_evaluation( + val_result=results, + split_name=split_name, + epoch=cur_epoch, + ) + + def unwrap_dist_model(self, model): + if self.use_distributed: + return model.module + else: + return model + + def create_loaders( + self, + datasets, + num_workers, + batch_sizes, + is_trains, + collate_fns, + dataset_ratios=None, + ): + """ + Create dataloaders for training and validation. + """ + + def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): + # create a single dataloader for each split + if isinstance(dataset, ChainDataset) or isinstance( + dataset, wds.DataPipeline + ): + # wds.WebdDataset instance are chained together + # webdataset.DataPipeline has its own sampler and collate_fn + loader = iter( + DataLoader( + dataset, + batch_size=bsz, + num_workers=num_workers, + pin_memory=True, + ) + ) + else: + # map-style dataset are concatenated together + # setup distributed sampler + + if self.use_distributed: + sampler = DistributedSampler( + dataset, + shuffle=is_train, + num_replicas=get_world_size(), + rank=get_rank(), + ) + if not self.use_dist_eval_sampler: + # e.g. retrieval evaluation + sampler = sampler if is_train else None + else: + sampler = None + + loader = DataLoader( + dataset, + batch_size=bsz, + num_workers=num_workers, + pin_memory=True, + sampler=sampler, + shuffle=sampler is None and is_train, + collate_fn=collate_fn, + drop_last=True if is_train else False, + ) + loader = PrefetchLoader(loader) + + if is_train: + loader = IterLoader(loader, use_distributed=self.use_distributed) + + return loader + + loaders = [] + + for dataset, bsz, is_train, collate_fn in zip( + datasets, batch_sizes, is_trains, collate_fns + ): + if isinstance(dataset, list) or isinstance(dataset, tuple): + if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: + dataset_ratios = [d.sample_ratio for d in dataset] + loader = MultiIterLoader( + loaders=[ + _create_loader(d, num_workers, bsz[i], is_train, collate_fn[i]) + for i, d in enumerate(dataset) + ], + ratios=dataset_ratios, + ) + else: + loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) + + loaders.append(loader) + + return loaders + + @main_process + def _save_checkpoint(self, cur_epoch, is_best=False): + """ + Save the checkpoint at the current epoch. + """ + model_no_ddp = self.unwrap_dist_model(self.model) + param_grad_dic = { + k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() + } + state_dict = model_no_ddp.state_dict() + for k in list(state_dict.keys()): + if k in param_grad_dic.keys() and not param_grad_dic[k]: + # delete parameters that do not require gradient + del state_dict[k] + save_obj = { + "model": state_dict, + "optimizer": self.optimizer.state_dict(), + "config": self.config.to_dict(), + "scaler": self.scaler.state_dict() if self.scaler else None, + "epoch": cur_epoch, + } + save_to = os.path.join( + self.output_dir, + "checkpoint_{}.pth".format("best" if is_best else cur_epoch), + ) + logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) + torch.save(save_obj, save_to) + + def _reload_best_model(self, model): + """ + Load the best checkpoint for evaluation. + """ + checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") + + logging.info("Loading checkpoint from {}.".format(checkpoint_path)) + checkpoint = torch.load(checkpoint_path, map_location="cpu") + try: + model.load_state_dict(checkpoint["model"]) + except RuntimeError as e: + logging.warning( + """ + Key mismatch when loading checkpoint. This is expected if only part of the model is saved. + Trying to load the model with strict=False. + """ + ) + model.load_state_dict(checkpoint["model"], strict=False) + return model + + def _load_checkpoint(self, url_or_filename): + """ + Resume from a checkpoint. + """ + if is_url(url_or_filename): + cached_file = download_cached_file( + url_or_filename, check_hash=False, progress=True + ) + checkpoint = torch.load(cached_file, map_location=self.device) + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location=self.device) + else: + raise RuntimeError("checkpoint url or path is invalid") + + state_dict = checkpoint["model"] + message = self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) + + self.optimizer.load_state_dict(checkpoint["optimizer"]) + if self.scaler and "scaler" in checkpoint: + self.scaler.load_state_dict(checkpoint["scaler"]) + + self.start_epoch = checkpoint["epoch"] + 1 + print("resume the checkpoint") + logging.info("Resume checkpoint from {}".format(url_or_filename)) + + @main_process + def log_stats(self, stats, split_name): + if isinstance(stats, dict): + log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} + with open(os.path.join(self.output_dir, "log.txt"), "a") as f: + f.write(json.dumps(log_stats) + "\n") + elif isinstance(stats, list): + pass + + @main_process + def log_config(self): + with open(os.path.join(self.output_dir, "log.txt"), "a") as f: + f.write(json.dumps(self.config.to_dict(), indent=4) + "\n") diff --git a/minigpt4/tasks/__init__.py b/minigpt4/tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ab1fb1c8289535cf9397bb9805c0cba3666ad26f --- /dev/null +++ b/minigpt4/tasks/__init__.py @@ -0,0 +1,26 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from minigpt4.common.registry import registry +from minigpt4.tasks.base_task import BaseTask +from minigpt4.tasks.image_text_pretrain import ImageTextPretrainTask + + +def setup_task(cfg): + assert "task" in cfg.run_cfg, "Task name must be provided." + + task_name = cfg.run_cfg.task + task = registry.get_task_class(task_name).setup_task(cfg=cfg) + assert task is not None, "Task {} not properly registered.".format(task_name) + + return task + + +__all__ = [ + "BaseTask", + "ImageTextPretrainTask", +] diff --git a/minigpt4/tasks/__pycache__/__init__.cpython-39.pyc b/minigpt4/tasks/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..169e92a6d32a610ae2cc00efd4a2c837e2a106cf Binary files /dev/null and b/minigpt4/tasks/__pycache__/__init__.cpython-39.pyc differ diff --git a/minigpt4/tasks/__pycache__/base_task.cpython-39.pyc b/minigpt4/tasks/__pycache__/base_task.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8eb1a1fbca64778365460df0f7741c3ac7072ff Binary files /dev/null and b/minigpt4/tasks/__pycache__/base_task.cpython-39.pyc differ diff --git a/minigpt4/tasks/__pycache__/image_text_pretrain.cpython-39.pyc b/minigpt4/tasks/__pycache__/image_text_pretrain.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a990f8e9f2c752789e3019963f0b5b82e1df5806 Binary files /dev/null and b/minigpt4/tasks/__pycache__/image_text_pretrain.cpython-39.pyc differ diff --git a/minigpt4/tasks/base_task.py b/minigpt4/tasks/base_task.py new file mode 100644 index 0000000000000000000000000000000000000000..1cfa46ce6ae8b0319e7094d23bf9d1ff0393f9b9 --- /dev/null +++ b/minigpt4/tasks/base_task.py @@ -0,0 +1,290 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import logging +import os + +import torch +import torch.distributed as dist +from minigpt4.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized +from minigpt4.common.logger import MetricLogger, SmoothedValue +from minigpt4.common.registry import registry +from minigpt4.datasets.data_utils import prepare_sample +import wandb + +class BaseTask: + def __init__(self, **kwargs): + super().__init__() + + self.inst_id_key = "instance_id" + self.cfg = "" + + @classmethod + def setup_task(cls, **kwargs): + return cls() + + def build_model(self, cfg): + self.cfg = cfg + model_config = cfg.model_cfg + + model_cls = registry.get_model_class(model_config.arch) + return model_cls.from_config(model_config) + + def build_datasets(self, cfg): + """ + Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. + Download dataset and annotations automatically if not exist. + + Args: + cfg (common.config.Config): _description_ + + Returns: + dict: Dictionary of torch.utils.data.Dataset objects by split. + """ + + datasets = dict() + + datasets_config = cfg.datasets_cfg + + assert len(datasets_config) > 0, "At least one dataset has to be specified." + + for name in datasets_config: + dataset_config = datasets_config[name] + + builder = registry.get_builder_class(name)(dataset_config) + dataset = builder.build_datasets() + + dataset['train'].name = name + if 'sample_ratio' in dataset_config: + dataset['train'].sample_ratio = dataset_config.sample_ratio + + datasets[name] = dataset + + return datasets + + def train_step(self, model, samples): + loss = model(samples)["loss"] + return loss + + def valid_step(self, model, samples): + raise NotImplementedError + + def before_evaluation(self, model, dataset, **kwargs): + model.before_evaluation(dataset=dataset, task_type=type(self)) + + def after_evaluation(self, **kwargs): + pass + + def inference_step(self): + raise NotImplementedError + + def evaluation(self, model, data_loader, cuda_enabled=True): + metric_logger = MetricLogger(delimiter=" ") + header = "Evaluation" + # TODO make it configurable + print_freq = 10 + + results = [] + + for samples in metric_logger.log_every(data_loader, print_freq, header): + samples = prepare_sample(samples, cuda_enabled=cuda_enabled) + + eval_output = self.valid_step(model=model, samples=samples) + results.extend(eval_output) + + if is_dist_avail_and_initialized(): + dist.barrier() + + return results + + def train_epoch( + self, + epoch, + model, + data_loader, + optimizer, + lr_scheduler, + scaler=None, + cuda_enabled=False, + log_freq=50, + accum_grad_iters=1, + ): + return self._train_inner_loop( + epoch=epoch, + iters_per_epoch=lr_scheduler.iters_per_epoch, + model=model, + data_loader=data_loader, + optimizer=optimizer, + scaler=scaler, + lr_scheduler=lr_scheduler, + log_freq=log_freq, + cuda_enabled=cuda_enabled, + accum_grad_iters=accum_grad_iters, + ) + + def train_iters( + self, + epoch, + start_iters, + iters_per_inner_epoch, + model, + data_loader, + optimizer, + lr_scheduler, + scaler=None, + cuda_enabled=False, + log_freq=50, + accum_grad_iters=1, + ): + return self._train_inner_loop( + epoch=epoch, + start_iters=start_iters, + iters_per_epoch=iters_per_inner_epoch, + model=model, + data_loader=data_loader, + optimizer=optimizer, + scaler=scaler, + lr_scheduler=lr_scheduler, + log_freq=log_freq, + cuda_enabled=cuda_enabled, + accum_grad_iters=accum_grad_iters, + ) + + def _train_inner_loop( + self, + epoch, + iters_per_epoch, + model, + data_loader, + optimizer, + lr_scheduler, + scaler=None, + start_iters=None, + log_freq=50, + cuda_enabled=False, + accum_grad_iters=1, + ): + """ + An inner training loop compatible with both epoch-based and iter-based training. + + When using epoch-based, training stops after one epoch; when using iter-based, + training stops after #iters_per_epoch iterations. + """ + use_amp = scaler is not None + + if not hasattr(data_loader, "__next__"): + # convert to iterator if not already + data_loader = iter(data_loader) + + metric_logger = MetricLogger(delimiter=" ") + metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) + metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}")) + + # if iter-based runner, schedule lr based on inner epoch. + logging.info( + "Start training epoch {}, {} iters per inner epoch.".format( + epoch, iters_per_epoch + ) + ) + header = "Train: data epoch: [{}]".format(epoch) + if start_iters is None: + # epoch-based runner + inner_epoch = epoch + else: + # In iter-based runner, we schedule the learning rate based on iterations. + inner_epoch = start_iters // iters_per_epoch + header = header + "; inner epoch [{}]".format(inner_epoch) + + for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): + # if using iter-based runner, we stop after iters_per_epoch iterations. + if i >= iters_per_epoch: + break + + samples = next(data_loader) + + samples = prepare_sample(samples, cuda_enabled=cuda_enabled) + samples.update( + { + "epoch": inner_epoch, + "num_iters_per_epoch": iters_per_epoch, + "iters": i, + } + ) + + lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) + + with torch.cuda.amp.autocast(enabled=use_amp): + loss = self.train_step(model=model, samples=samples) + + # after_train_step() + if use_amp: + scaler.scale(loss).backward() + else: + loss.backward() + + # update gradients every accum_grad_iters iterations + if (i + 1) % accum_grad_iters == 0: + if use_amp: + scaler.step(optimizer) + scaler.update() + else: + optimizer.step() + optimizer.zero_grad() + # if self.cfg.wandb_log: + if self.cfg.run_cfg.wandb_log: + wandb.log({"epoch": inner_epoch, "loss": loss}) + metric_logger.update(loss=loss.item()) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + + # after train_epoch() + # gather the stats from all processes + metric_logger.synchronize_between_processes() + logging.info("Averaged stats: " + str(metric_logger.global_avg())) + return { + k: "{:.3f}".format(meter.global_avg) + for k, meter in metric_logger.meters.items() + } + + @staticmethod + def save_result(result, result_dir, filename, remove_duplicate=""): + import json + + result_file = os.path.join( + result_dir, "%s_rank%d.json" % (filename, get_rank()) + ) + final_result_file = os.path.join(result_dir, "%s.json" % filename) + + json.dump(result, open(result_file, "w")) + + if is_dist_avail_and_initialized(): + dist.barrier() + + if is_main_process(): + logging.warning("rank %d starts merging results." % get_rank()) + # combine results from all processes + result = [] + + for rank in range(get_world_size()): + result_file = os.path.join( + result_dir, "%s_rank%d.json" % (filename, rank) + ) + res = json.load(open(result_file, "r")) + result += res + + if remove_duplicate: + result_new = [] + id_list = [] + for res in result: + if res[remove_duplicate] not in id_list: + id_list.append(res[remove_duplicate]) + result_new.append(res) + result = result_new + + json.dump(result, open(final_result_file, "w")) + print("result file saved to %s" % final_result_file) + + return final_result_file diff --git a/minigpt4/tasks/image_text_pretrain.py b/minigpt4/tasks/image_text_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..bbe8ec83a5dc95ee26a36e457feb394d18b7cd17 --- /dev/null +++ b/minigpt4/tasks/image_text_pretrain.py @@ -0,0 +1,18 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from minigpt4.common.registry import registry +from minigpt4.tasks.base_task import BaseTask + + +@registry.register_task("image_text_pretrain") +class ImageTextPretrainTask(BaseTask): + def __init__(self): + super().__init__() + + def evaluation(self, model, data_loader, cuda_enabled=True): + pass diff --git a/modified/__init__.py b/modified/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ae271f0602c6dec3478418dff31a9c99f389874 --- /dev/null +++ b/modified/__init__.py @@ -0,0 +1,10 @@ +__version__ = "4.37.0.dev0" +from .models.phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig +from .models.phi import ( + PHI_PRETRAINED_MODEL_ARCHIVE_LIST, + PhiForCausalLM, + PhiForSequenceClassification, + PhiForTokenClassification, + PhiModel, + PhiPreTrainedModel, + ) \ No newline at end of file diff --git a/modified/__pycache__/__init__.cpython-39.pyc b/modified/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6e931d61aef78eb1e9fdd6ad015cb11c445c4c2 Binary files /dev/null and b/modified/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/__pycache__/activations.cpython-39.pyc b/modified/__pycache__/activations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..950d697a91d1374d0affa8ba00d6477e210e2916 Binary files /dev/null and b/modified/__pycache__/activations.cpython-39.pyc differ diff --git a/modified/__pycache__/cache_utils.cpython-39.pyc b/modified/__pycache__/cache_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a40f71f298de5af64f4005f35dcf0fccbf5740b5 Binary files /dev/null and b/modified/__pycache__/cache_utils.cpython-39.pyc differ diff --git a/modified/__pycache__/configuration_utils.cpython-39.pyc b/modified/__pycache__/configuration_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..775078827e93115338b4f5878d1bce1958d8ea16 Binary files /dev/null and b/modified/__pycache__/configuration_utils.cpython-39.pyc differ diff --git a/modified/__pycache__/dependency_versions_check.cpython-39.pyc b/modified/__pycache__/dependency_versions_check.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf35491b308746d1cc7b3e4f22cadace129cebb Binary files /dev/null and b/modified/__pycache__/dependency_versions_check.cpython-39.pyc differ diff --git a/modified/__pycache__/dependency_versions_table.cpython-39.pyc b/modified/__pycache__/dependency_versions_table.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f24d85924dc263edbd67daffc7f822b322744d3 Binary files /dev/null and b/modified/__pycache__/dependency_versions_table.cpython-39.pyc differ diff --git a/modified/__pycache__/dynamic_module_utils.cpython-39.pyc b/modified/__pycache__/dynamic_module_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baf16e573c0fe2e00c905756acfe8a96483092ec Binary files /dev/null and b/modified/__pycache__/dynamic_module_utils.cpython-39.pyc differ diff --git a/modified/__pycache__/modeling_attn_mask_utils.cpython-39.pyc b/modified/__pycache__/modeling_attn_mask_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa4428e1c3506a442a0fa9487b849bf237ae35f2 Binary files /dev/null and b/modified/__pycache__/modeling_attn_mask_utils.cpython-39.pyc differ diff --git a/modified/__pycache__/modeling_outputs.cpython-39.pyc b/modified/__pycache__/modeling_outputs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d80510dd348538f69aeddd2c6836cea99741a31 Binary files /dev/null and b/modified/__pycache__/modeling_outputs.cpython-39.pyc differ diff --git a/modified/__pycache__/modeling_utils.cpython-39.pyc b/modified/__pycache__/modeling_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb77d2a4c51adc4ae77011a3e6eba34b24dc191c Binary files /dev/null and b/modified/__pycache__/modeling_utils.cpython-39.pyc differ diff --git a/modified/__pycache__/optimization.cpython-39.pyc b/modified/__pycache__/optimization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a7a3794b4de91237ae73bfa15ad1ac0459c967e Binary files /dev/null and b/modified/__pycache__/optimization.cpython-39.pyc differ diff --git a/modified/__pycache__/pytorch_utils.cpython-39.pyc b/modified/__pycache__/pytorch_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..218e08cf91661821ddea43324a9c736cf18090d8 Binary files /dev/null and b/modified/__pycache__/pytorch_utils.cpython-39.pyc differ diff --git a/modified/__pycache__/safetensors_conversion.cpython-39.pyc b/modified/__pycache__/safetensors_conversion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7a0afd2ad2bd16d058c657030b5de09543fe04e Binary files /dev/null and b/modified/__pycache__/safetensors_conversion.cpython-39.pyc differ diff --git a/modified/__pycache__/trainer_utils.cpython-39.pyc b/modified/__pycache__/trainer_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d463a95257bc0f1b95b84943ab757251eb0ef181 Binary files /dev/null and b/modified/__pycache__/trainer_utils.cpython-39.pyc differ diff --git a/modified/activations.py b/modified/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..2355fb5fed678d0de6e2c53f52644a35a691a34e --- /dev/null +++ b/modified/activations.py @@ -0,0 +1,239 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from collections import OrderedDict + +import torch +from packaging import version +from torch import Tensor, nn + +from .utils import logging + + +logger = logging.get_logger(__name__) + + +class PytorchGELUTanh(nn.Module): + """ + A fast C implementation of the tanh approximation of the GeLU activation function. See + https://arxiv.org/abs/1606.08415. + + This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical + match due to rounding errors. + """ + + def __init__(self): + super().__init__() + if version.parse(torch.__version__) < version.parse("1.12.0"): + raise ImportError( + f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use " + "PytorchGELUTanh. Please upgrade torch." + ) + + def forward(self, input: Tensor) -> Tensor: + return nn.functional.gelu(input, approximate="tanh") + + +class NewGELUActivation(nn.Module): + """ + Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see + the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 + """ + + def forward(self, input: Tensor) -> Tensor: + return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0)))) + + +class GELUActivation(nn.Module): + """ + Original Implementation of the GELU activation function in Google BERT repo when initially created. For + information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional + Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 + """ + + def __init__(self, use_gelu_python: bool = False): + super().__init__() + if use_gelu_python: + self.act = self._gelu_python + else: + self.act = nn.functional.gelu + + def _gelu_python(self, input: Tensor) -> Tensor: + return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0))) + + def forward(self, input: Tensor) -> Tensor: + return self.act(input) + + +class FastGELUActivation(nn.Module): + """ + Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs + """ + + def forward(self, input: Tensor) -> Tensor: + return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input))) + + +class QuickGELUActivation(nn.Module): + """ + Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs + """ + + def forward(self, input: Tensor) -> Tensor: + return input * torch.sigmoid(1.702 * input) + + +class ClippedGELUActivation(nn.Module): + """ + Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as + it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to + https://arxiv.org/abs/2004.09602. + + Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when + initially created. + + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415 + """ + + def __init__(self, min: float, max: float): + if min > max: + raise ValueError(f"min should be < max (got min: {min}, max: {max})") + + super().__init__() + self.min = min + self.max = max + + def forward(self, x: Tensor) -> Tensor: + return torch.clip(gelu(x), self.min, self.max) + + +class AccurateGELUActivation(nn.Module): + """ + Applies GELU approximation that is faster than default and more accurate than QuickGELU. See: + https://github.com/hendrycks/GELUs + + Implemented along with MEGA (Moving Average Equipped Gated Attention) + """ + + def __init__(self): + super().__init__() + self.precomputed_constant = math.sqrt(2 / math.pi) + + def forward(self, input: Tensor) -> Tensor: + return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3)))) + + +class MishActivation(nn.Module): + """ + See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also + visit the official repository for the paper: https://github.com/digantamisra98/Mish + """ + + def __init__(self): + super().__init__() + if version.parse(torch.__version__) < version.parse("1.9.0"): + self.act = self._mish_python + else: + self.act = nn.functional.mish + + def _mish_python(self, input: Tensor) -> Tensor: + return input * torch.tanh(nn.functional.softplus(input)) + + def forward(self, input: Tensor) -> Tensor: + return self.act(input) + + +class LinearActivation(nn.Module): + """ + Applies the linear activation function, i.e. forwarding input directly to output. + """ + + def forward(self, input: Tensor) -> Tensor: + return input + + +class LaplaceActivation(nn.Module): + """ + Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See + https://arxiv.org/abs/2209.10655 + + Inspired by squared relu, but with bounded range and gradient for better stability + """ + + def forward(self, input, mu=0.707107, sigma=0.282095): + input = (input - mu).div(sigma * math.sqrt(2.0)) + return 0.5 * (1.0 + torch.erf(input)) + + +class ReLUSquaredActivation(nn.Module): + """ + Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2 + """ + + def forward(self, input): + relu_applied = nn.functional.relu(input) + squared = torch.square(relu_applied) + return squared + + +class ClassInstantier(OrderedDict): + def __getitem__(self, key): + content = super().__getitem__(key) + cls, kwargs = content if isinstance(content, tuple) else (content, {}) + return cls(**kwargs) + + +ACT2CLS = { + "gelu": GELUActivation, + "gelu_10": (ClippedGELUActivation, {"min": -10, "max": 10}), + "gelu_fast": FastGELUActivation, + "gelu_new": NewGELUActivation, + "gelu_python": (GELUActivation, {"use_gelu_python": True}), + "gelu_pytorch_tanh": PytorchGELUTanh, + "gelu_accurate": AccurateGELUActivation, + "laplace": LaplaceActivation, + "leaky_relu": nn.LeakyReLU, + "linear": LinearActivation, + "mish": MishActivation, + "quick_gelu": QuickGELUActivation, + "relu": nn.ReLU, + "relu2": ReLUSquaredActivation, + "relu6": nn.ReLU6, + "sigmoid": nn.Sigmoid, + "silu": nn.SiLU, + "swish": nn.SiLU, + "tanh": nn.Tanh, +} +ACT2FN = ClassInstantier(ACT2CLS) + + +def get_activation(activation_string): + if activation_string in ACT2FN: + return ACT2FN[activation_string] + else: + raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}") + + +# For backwards compatibility with: from activations import gelu_python +gelu_python = get_activation("gelu_python") +gelu_new = get_activation("gelu_new") +gelu = get_activation("gelu") +gelu_fast = get_activation("gelu_fast") +quick_gelu = get_activation("quick_gelu") +silu = get_activation("silu") +mish = get_activation("mish") +linear_act = get_activation("linear") diff --git a/modified/cache_utils.py b/modified/cache_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b298a7bdd0f5d6a501fbe474eac03033f8a85c1b --- /dev/null +++ b/modified/cache_utils.py @@ -0,0 +1,322 @@ +from typing import Any, Dict, List, Optional, Tuple + +import torch + + +class Cache: + """ + Base, abstract class for all caches. The actual data structure is specific to each subclass. + """ + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. + + Parameters: + key_states (`torch.Tensor`): + The new key states to cache. + value_states (`torch.Tensor`): + The new value states to cache. + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. These are specific to each subclass and allow new types of + cache to be created. + + Return: + A tuple containing the updated key and value states. + """ + raise NotImplementedError("Make sure to implement `update` in a subclass.") + + def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states. A layer index can be optionally passed.""" + raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.") + + def get_max_length(self) -> Optional[int]: + """Returns the maximum sequence length of the cached states, if there is any.""" + raise NotImplementedError("Make sure to implement `get_max_length` in a subclass.") + + def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int: + """Given the sequence length of the new inputs, returns the usable length of the cache.""" + # Cache without size limit -> all cache is usable + # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache + # length, we will need to evict part of the cache (and thus not all cache is usable) + max_length = self.get_max_length() + previous_seq_length = self.get_seq_length(layer_idx) + if max_length is not None and previous_seq_length + new_seq_length > max_length: + return max_length - new_seq_length + return previous_seq_length + + +class DynamicCache(Cache): + """ + A cache that grows dynamically as more tokens are generated. This is the default for generative models. + + It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is + `[batch_size, num_heads, seq_len, head_dim]`. + """ + + def __init__(self) -> None: + self.key_cache: List[torch.Tensor] = [] + self.value_cache: List[torch.Tensor] = [] + self.seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen + + def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: + """ + Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the + sequence length. + """ + if layer_idx < len(self): + return (self.key_cache[layer_idx], self.value_cache[layer_idx]) + else: + raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}") + + def __iter__(self): + """ + Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over + keys and values + """ + for layer_idx in range(len(self)): + yield (self.key_cache[layer_idx], self.value_cache[layer_idx]) + + def __len__(self): + """ + Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds + to the number of layers in the model. + """ + return len(self.key_cache) + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. + + Parameters: + key_states (`torch.Tensor`): + The new key states to cache. + value_states (`torch.Tensor`): + The new value states to cache. + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. + + Return: + A tuple containing the updated key and value states. + """ + # Update the number of seen tokens + if layer_idx == 0: + self.seen_tokens += key_states.shape[-2] + + # Update the cache + if len(self.key_cache) <= layer_idx: + self.key_cache.append(key_states) + self.value_cache.append(value_states) + else: + self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) + self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) + + return self.key_cache[layer_idx], self.value_cache[layer_idx] + + def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states. A layer index can be optionally passed.""" + if len(self.key_cache) <= layer_idx: + return 0 + return self.key_cache[layer_idx].shape[-2] + + def get_max_length(self) -> Optional[int]: + """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length.""" + return None + + def reorder_cache(self, beam_idx: torch.LongTensor): + """Reorders the cache for beam search, given the selected beam indices.""" + for layer_idx in range(len(self.key_cache)): + device = self.key_cache[layer_idx].device + self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) + device = self.value_cache[layer_idx].device + self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) + + def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: + """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format.""" + legacy_cache = () + for layer_idx in range(len(self)): + legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),) + return legacy_cache + + @classmethod + def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache": + """Converts a cache in the legacy cache format into an equivalent `DynamicCache`.""" + cache = cls() + if past_key_values is not None: + for layer_idx in range(len(past_key_values)): + key_states, value_states = past_key_values[layer_idx] + cache.update(key_states, value_states, layer_idx) + return cache + + +class SinkCache(Cache): + """ + A cache that as described in the [Attention Sinks paper](https://arxiv.org/abs/2309.17453). It allows the model to + generate beyond the length of its context window, without losing fluency in the conversation. As it discards past + tokens, the model will lose the ability to generate tokens that depend on the context that was discarded. + + It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is + `[batch_size, num_heads, seq_len, head_dim]`. + + Parameters: + window_length (`int`): + The length of the context window. + num_sink_tokens (`int`): + The number of sink tokens. See the original paper for more information. + """ + + def __init__(self, window_length: int, num_sink_tokens: int) -> None: + self.key_cache: List[torch.Tensor] = [] + self.value_cache: List[torch.Tensor] = [] + self.window_length = window_length + self.num_sink_tokens = num_sink_tokens + self.cos_sin_cache = {} + self.seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen + + @staticmethod + def _rotate_half(x): + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + def _apply_key_rotary_pos_emb( + self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor + ) -> torch.Tensor: + rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin) + return rotated_key_states + + def _get_rerotation_cos_sin( + self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + if key_states.shape[-2] not in self.cos_sin_cache: + # Upcast to float32 temporarily for better accuracy + cos = cos.to(torch.float32) + sin = sin.to(torch.float32) + + # Compute the cos and sin required for back- and forward-rotating to one position earlier in the sequence + original_cos = cos[self.num_sink_tokens + key_states.shape[-2] :] + shifted_cos = cos[self.num_sink_tokens : -key_states.shape[-2]] + original_sin = sin[self.num_sink_tokens + key_states.shape[-2] :] + shifted_sin = sin[self.num_sink_tokens : -key_states.shape[-2]] + rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin + rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin + + self.cos_sin_cache[key_states.shape[-2]] = ( + rerotation_cos.to(key_states.dtype).unsqueeze(0), + rerotation_sin.to(key_states.dtype).unsqueeze(0), + ) + return self.cos_sin_cache[key_states.shape[-2]] + + def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states. A layer index can be optionally passed.""" + # Workaround to make 'key_states.shape[-2] + past_key_value.get_seq_length(self.layer_idx)' <= window_length + if len(self.key_cache) <= layer_idx: + return 0 + return self.key_cache[layer_idx].shape[-2] + + def get_max_length(self) -> Optional[int]: + """Returns the maximum sequence length of the cached states.""" + return self.window_length + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. + + Parameters: + key_states (`torch.Tensor`): + The new key states to cache. + value_states (`torch.Tensor`): + The new value states to cache. + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. The following arguments can be used in `SinkCache`: `sin`, + `cos` and `partial_rotation_size`. These arguments are used with models using RoPE, to recompute the + rotation as the tokens are shifted. + + Return: + A tuple containing the updated key and value states. + """ + # Optional kwargs for `SinkCache` -- needed on models using RoPE. `partial_rotation_size` is used on models + # with partially rotated position embeddings, like Phi or Persimmon. + sin = cache_kwargs.get("sin") + cos = cache_kwargs.get("cos") + partial_rotation_size = cache_kwargs.get("partial_rotation_size") + using_rope = cos is not None and sin is not None + + # Update the number of seen tokens + if layer_idx == 0: + self.seen_tokens += key_states.shape[-2] + + # [bsz, num_heads, seq_len, head_dim] + if len(self.key_cache) <= layer_idx: + # Empty cache + self.key_cache.append(key_states) + self.value_cache.append(value_states) + + elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length: + # Growing cache + self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) + self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) + + else: + # Shifting cache + keys_to_keep = self.key_cache[layer_idx][ + :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2] : + ] + + # On RoPE models, we need to recompute the Key rotation as the tokens are shifted + if using_rope: + rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin( + key_states, cos[: self.window_length], sin[: self.window_length] + ) + if partial_rotation_size is not None: + keys_to_keep, keys_pass = ( + keys_to_keep[..., :partial_rotation_size], + keys_to_keep[..., partial_rotation_size:], + ) + keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin) + if partial_rotation_size is not None: + keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1) + + # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens + sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens] + self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2) + + sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens] + values_to_keep = self.value_cache[layer_idx][ + :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2] : + ] + self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2) + + return self.key_cache[layer_idx], self.value_cache[layer_idx] + + def reorder_cache(self, beam_idx: torch.LongTensor): + """Reorders the cache for beam search, given the selected beam indices.""" + for layer_idx in range(len(self.key_cache)): + device = self.key_cache[layer_idx].device + self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) + device = self.value_cache[layer_idx].device + self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) diff --git a/modified/configuration_utils.py b/modified/configuration_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5c419ee0fc7cf31a27715f8eb3af0e60f5d6ed69 --- /dev/null +++ b/modified/configuration_utils.py @@ -0,0 +1,1107 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Configuration base class and utilities.""" + + +import copy +import json +import os +import re +import warnings +from typing import Any, Dict, List, Optional, Tuple, Union + +from packaging import version + +from . import __version__ +from .dynamic_module_utils import custom_object_save +from .utils import ( + CONFIG_NAME, + PushToHubMixin, + add_model_info_to_auto_map, + cached_file, + copy_func, + download_url, + extract_commit_hash, + is_remote_url, + is_torch_available, + logging, +) + + +logger = logging.get_logger(__name__) + +_re_configuration_file = re.compile(r"config\.(.*)\.json") + + +class PretrainedConfig(PushToHubMixin): + # no-format + r""" + Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as + methods for loading/downloading/saving configurations. + + + + A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to + initialize a model does **not** load the model weights. It only affects the model's configuration. + + + + Class attributes (overridden by derived classes): + + - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate + the correct object in [`~transformers.AutoConfig`]. + - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the + config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like: + [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`]. + - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary + outputs of the model during inference. + - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized + naming of attributes. + + Common attributes (present in all subclasses): + + - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the + embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT). + - **hidden_size** (`int`) -- The hidden size of the model. + - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the + model. + - **num_hidden_layers** (`int`) -- The number of blocks in the model. + + Arg: + name_or_path (`str`, *optional*, defaults to `""`): + Store the string that was passed to [`PreTrainedModel.from_pretrained`] or + [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created + with such a method. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not the model should return all hidden-states. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not the model should returns all attentions. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple. + is_encoder_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as an encoder/decoder or not. + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as decoder or not (in which case it's used as an encoder). + cross_attention_hidden_size** (`bool`, *optional*): + The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder + setting and the cross-attention hidden dimension differs from `self.config.hidden_size`. + add_cross_attention (`bool`, *optional*, defaults to `False`): + Whether cross-attention layers should be added to the model. Note, this option is only relevant for models + that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models + in `AUTO_MODELS_FOR_CAUSAL_LM`. + tie_encoder_decoder (`bool`, *optional*, defaults to `False`): + Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder + and decoder model to have the exact same parameter names. + prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`): + Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of + heads to prune in said layer. + + For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. + chunk_size_feed_forward (`int`, *optional*, defaults to `0`): + The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that + the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` < + sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed + Forward Chunking work?](../glossary.html#feed-forward-chunking). + + > Parameters for sequence generation + + max_length (`int`, *optional*, defaults to 20): + Maximum length that will be used by default in the `generate` method of the model. + min_length (`int`, *optional*, defaults to 0): + Minimum length that will be used by default in the `generate` method of the model. + do_sample (`bool`, *optional*, defaults to `False`): + Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ; + use greedy decoding otherwise. + early_stopping (`bool`, *optional*, defaults to `False`): + Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search + when at least `num_beams` sentences are finished per batch or not. + num_beams (`int`, *optional*, defaults to 1): + Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means + no beam search. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams + that will be used by default in the `generate` method of the model. 1 means no group beam search. + diversity_penalty (`float`, *optional*, defaults to 0.0): + Value to control diversity for group beam search. that will be used by default in the `generate` method of + the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs. + temperature (`float`, *optional*, defaults to 1.0): + The value used to module the next token probabilities that will be used by default in the `generate` method + of the model. Must be strictly positive. + top_k (`int`, *optional*, defaults to 50): + Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in + the `generate` method of the model. + top_p (`float`, *optional*, defaults to 1): + Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1, + only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. + typical_p (`float`, *optional*, defaults to 1): + Local typicality measures how similar the conditional probability of predicting a target token next is to + the expected conditional probability of predicting a random token next, given the partial text already + generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that + add up to `typical_p` or higher are kept for generation. See [this + paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. + repetition_penalty (`float`, *optional*, defaults to 1): + Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0 + means no penalty. + length_penalty (`float`, *optional*, defaults to 1): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the + `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can + only occur once. + encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by + default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all + ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. + bad_words_ids (`List[int]`, *optional*): + List of token ids that are not allowed to be generated that will be used by default in the `generate` + method of the model. In order to get the tokens of the words that should not appear in the generated text, + use `tokenizer.encode(bad_word, add_prefix_space=True)`. + num_return_sequences (`int`, *optional*, defaults to 1): + Number of independently computed returned sequences for each element in the batch that will be used by + default in the `generate` method of the model. + output_scores (`bool`, *optional*, defaults to `False`): + Whether the model should return the logits when used for generation. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`. + forced_bos_token_id (`int`, *optional*): + The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for + multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target + language token. + forced_eos_token_id (`int`, *optional*): + The id of the token to force as the last generated token when `max_length` is reached. + remove_invalid_values (`bool`, *optional*): + Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash. + Note that using `remove_invalid_values` can slow down generation. + + > Parameters for fine-tuning tasks + + architectures (`List[str]`, *optional*): + Model architectures that can be used with the model pretrained weights. + finetuning_task (`str`, *optional*): + Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow + or PyTorch) checkpoint. + id2label (`Dict[int, str]`, *optional*): + A map from index (for instance prediction index, or target index) to label. + label2id (`Dict[str, int]`, *optional*): A map from label to index for the model. + num_labels (`int`, *optional*): + Number of labels to use in the last layer added to the model, typically for a classification task. + task_specific_params (`Dict[str, Any]`, *optional*): + Additional keyword arguments to store for the current task. + problem_type (`str`, *optional*): + Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`, + `"single_label_classification"` or `"multi_label_classification"`. + + > Parameters linked to the tokenizer + + tokenizer_class (`str`, *optional*): + The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the + model by default). + prefix (`str`, *optional*): + A specific prompt that should be added at the beginning of each text before calling the model. + bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token. + pad_token_id (`int`, *optional*): The id of the _padding_ token. + eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token. + decoder_start_token_id (`int`, *optional*): + If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. + sep_token_id (`int`, *optional*): The id of the _separation_ token. + + > PyTorch specific parameters + + torchscript (`bool`, *optional*, defaults to `False`): + Whether or not the model should be used with Torchscript. + tie_word_embeddings (`bool`, *optional*, defaults to `True`): + Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the + model has a output word embedding layer. + torch_dtype (`str`, *optional*): + The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype` + (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved + model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load + `float16` weights. Since the config object is stored in plain text, this attribute contains just the + floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the + `"float16"` string. + + This attribute is currently not being used during model loading time, but this may change in the future + versions. But we can already start preparing for the future by saving the dtype with save_pretrained. + attn_implementation (`str`, *optional*): + The attention implementation to use in the model. Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (attention using [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (attention using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. + + > TensorFlow specific parameters + + use_bfloat16 (`bool`, *optional*, defaults to `False`): + Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models). + tf_legacy_loss (`bool`, *optional*, defaults to `False`): + Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may + not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers + v5. + """ + + model_type: str = "" + is_composition: bool = False + attribute_map: Dict[str, str] = {} + _auto_class: Optional[str] = None + + def __setattr__(self, key, value): + if key in super().__getattribute__("attribute_map"): + key = super().__getattribute__("attribute_map")[key] + super().__setattr__(key, value) + + def __getattribute__(self, key): + if key != "attribute_map" and key in super().__getattribute__("attribute_map"): + key = super().__getattribute__("attribute_map")[key] + return super().__getattribute__(key) + + def __init__(self, **kwargs): + # Attributes with defaults + self.return_dict = kwargs.pop("return_dict", True) + self.output_hidden_states = kwargs.pop("output_hidden_states", False) + self.output_attentions = kwargs.pop("output_attentions", False) + self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models + self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models + self.use_bfloat16 = kwargs.pop("use_bfloat16", False) + self.tf_legacy_loss = kwargs.pop("tf_legacy_loss", False) # Only used by TensorFlow models + self.pruned_heads = kwargs.pop("pruned_heads", {}) + self.tie_word_embeddings = kwargs.pop( + "tie_word_embeddings", True + ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models. + + # Is decoder is used in encoder-decoder models to differentiate encoder from decoder + self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False) + self.is_decoder = kwargs.pop("is_decoder", False) + self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None) + self.add_cross_attention = kwargs.pop("add_cross_attention", False) + self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False) + + # Parameters for sequence generation + self.max_length = kwargs.pop("max_length", 20) + self.min_length = kwargs.pop("min_length", 0) + self.do_sample = kwargs.pop("do_sample", False) + self.early_stopping = kwargs.pop("early_stopping", False) + self.num_beams = kwargs.pop("num_beams", 1) + self.num_beam_groups = kwargs.pop("num_beam_groups", 1) + self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0) + self.temperature = kwargs.pop("temperature", 1.0) + self.top_k = kwargs.pop("top_k", 50) + self.top_p = kwargs.pop("top_p", 1.0) + self.typical_p = kwargs.pop("typical_p", 1.0) + self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) + self.length_penalty = kwargs.pop("length_penalty", 1.0) + self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) + self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0) + self.bad_words_ids = kwargs.pop("bad_words_ids", None) + self.num_return_sequences = kwargs.pop("num_return_sequences", 1) + self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0) + self.output_scores = kwargs.pop("output_scores", False) + self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False) + self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None) + self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None) + self.remove_invalid_values = kwargs.pop("remove_invalid_values", False) + self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None) + self.suppress_tokens = kwargs.pop("suppress_tokens", None) + self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None) + + # Fine-tuning task arguments + self.architectures = kwargs.pop("architectures", None) + self.finetuning_task = kwargs.pop("finetuning_task", None) + self.id2label = kwargs.pop("id2label", None) + self.label2id = kwargs.pop("label2id", None) + if self.label2id is not None and not isinstance(self.label2id, dict): + raise ValueError("Argument label2id should be a dictionary.") + if self.id2label is not None: + if not isinstance(self.id2label, dict): + raise ValueError("Argument id2label should be a dictionary.") + num_labels = kwargs.pop("num_labels", None) + if num_labels is not None and len(self.id2label) != num_labels: + logger.warning( + f"You passed along `num_labels={num_labels}` with an incompatible id to label map: " + f"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}." + ) + self.id2label = {int(key): value for key, value in self.id2label.items()} + # Keys are always strings in JSON so convert ids to int here. + else: + self.num_labels = kwargs.pop("num_labels", 2) + + if self.torch_dtype is not None and isinstance(self.torch_dtype, str): + # we will start using self.torch_dtype in v5, but to be consistent with + # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object + if is_torch_available(): + import torch + + self.torch_dtype = getattr(torch, self.torch_dtype) + + # Tokenizer arguments TODO: eventually tokenizer and models should share the same config + self.tokenizer_class = kwargs.pop("tokenizer_class", None) + self.prefix = kwargs.pop("prefix", None) + self.bos_token_id = kwargs.pop("bos_token_id", None) + self.pad_token_id = kwargs.pop("pad_token_id", None) + self.eos_token_id = kwargs.pop("eos_token_id", None) + self.sep_token_id = kwargs.pop("sep_token_id", None) + + self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) + + # task specific arguments + self.task_specific_params = kwargs.pop("task_specific_params", None) + + # regression / multi-label classification + self.problem_type = kwargs.pop("problem_type", None) + allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification") + if self.problem_type is not None and self.problem_type not in allowed_problem_types: + raise ValueError( + f"The config parameter `problem_type` was not understood: received {self.problem_type} " + "but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid." + ) + + # TPU arguments + if kwargs.pop("xla_device", None) is not None: + logger.warning( + "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can " + "safely remove it from your `config.json` file." + ) + + # Name or path to the pretrained checkpoint + self._name_or_path = str(kwargs.pop("name_or_path", "")) + # Config hash + self._commit_hash = kwargs.pop("_commit_hash", None) + + # Attention implementation to use, if relevant. + self._attn_implementation_internal = kwargs.pop("attn_implementation", None) + + # Drop the transformers version info + self.transformers_version = kwargs.pop("transformers_version", None) + + # Deal with gradient checkpointing + if kwargs.get("gradient_checkpointing", False): + warnings.warn( + "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 " + "Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the " + "`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`." + ) + + # Additional attributes without default values + for key, value in kwargs.items(): + try: + setattr(self, key, value) + except AttributeError as err: + logger.error(f"Can't set {key} with value {value} for {self}") + raise err + + @property + def name_or_path(self) -> str: + return getattr(self, "_name_or_path", None) + + @name_or_path.setter + def name_or_path(self, value): + self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding) + + @property + def use_return_dict(self) -> bool: + """ + `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples. + """ + # If torchscript is set, force `return_dict=False` to avoid jit errors + return self.return_dict and not self.torchscript + + @property + def num_labels(self) -> int: + """ + `int`: The number of labels for classification models. + """ + return len(self.id2label) + + @num_labels.setter + def num_labels(self, num_labels: int): + if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels: + self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)} + self.label2id = dict(zip(self.id2label.values(), self.id2label.keys())) + + @property + def _attn_implementation(self): + # This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.) + if hasattr(self, "_attn_implementation_internal"): + if self._attn_implementation_internal is None: + # `config.attn_implementation` should never be None, for backward compatibility. + return "eager" + else: + return self._attn_implementation_internal + else: + return "eager" + + @_attn_implementation.setter + def _attn_implementation(self, value): + self._attn_implementation_internal = value + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the + [`~PretrainedConfig.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self._set_token_in_kwargs(kwargs) + + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self) + + # If we save using the predefined names, we can load using `from_pretrained` + output_config_file = os.path.join(save_directory, CONFIG_NAME) + + self.to_json_file(output_config_file, use_diff=True) + logger.info(f"Configuration saved in {output_config_file}") + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=kwargs.get("token"), + ) + + @staticmethod + def _set_token_in_kwargs(kwargs, token=None): + """Temporary method to deal with `token` and `use_auth_token`. + + This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`. + + Need to clean up `use_auth_token` in a follow PR. + """ + # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet. + if token is None: + token = kwargs.pop("token", None) + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ) -> "PretrainedConfig": + r""" + Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or + namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`. + - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if + they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final configuration object. + + If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a + dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the + part of `kwargs` which has not been used to update `config` and is otherwise ignored. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are configuration attributes will be used to override the loaded + values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled + by the `return_unused_kwargs` keyword parameter. + + Returns: + [`PretrainedConfig`]: The configuration object instantiated from this pretrained model. + + Examples: + + ```python + # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a + # derived class: BertConfig + config = BertConfig.from_pretrained( + "bert-base-uncased" + ) # Download configuration from huggingface.co and cache. + config = BertConfig.from_pretrained( + "./test/saved_model/" + ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')* + config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json") + config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) + assert config.output_attentions == True + config, unused_kwargs = BertConfig.from_pretrained( + "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True + ) + assert config.output_attentions == True + assert unused_kwargs == {"foo": False} + ```""" + kwargs["cache_dir"] = cache_dir + kwargs["force_download"] = force_download + kwargs["local_files_only"] = local_files_only + kwargs["revision"] = revision + + cls._set_token_in_kwargs(kwargs, token) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + @classmethod + def get_config_dict( + cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a + [`PretrainedConfig`] using `from_dict`. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`): + The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. + + Returns: + `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object. + + """ + cls._set_token_in_kwargs(kwargs) + + original_kwargs = copy.deepcopy(kwargs) + # Get config dict associated with the base config file + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + if "_commit_hash" in config_dict: + original_kwargs["_commit_hash"] = config_dict["_commit_hash"] + + # That config file may point us toward another config file to use. + if "configuration_files" in config_dict: + configuration_file = get_configuration_file(config_dict["configuration_files"]) + config_dict, kwargs = cls._get_config_dict( + pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs + ) + + return config_dict, kwargs + + @classmethod + def _get_config_dict( + cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + subfolder = kwargs.pop("subfolder", "") + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + commit_hash = kwargs.pop("_commit_hash", None) + + if trust_remote_code is True: + logger.warning( + "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" + " ignored." + ) + + user_agent = {"file_type": "config", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): + # Special case when pretrained_model_name_or_path is a local file + resolved_config_file = pretrained_model_name_or_path + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + configuration_file = pretrained_model_name_or_path + resolved_config_file = download_url(pretrained_model_name_or_path) + else: + configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME) + + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_config_file = cached_file( + pretrained_model_name_or_path, + configuration_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the same" + f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory" + f" containing a {configuration_file} file" + ) + + try: + # Load config dict + config_dict = cls._dict_from_json_file(resolved_config_file) + config_dict["_commit_hash"] = commit_hash + except (json.JSONDecodeError, UnicodeDecodeError): + raise EnvironmentError( + f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." + ) + + if is_local: + logger.info(f"loading configuration file {resolved_config_file}") + else: + logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") + + if "auto_map" in config_dict and not is_local: + config_dict["auto_map"] = add_model_info_to_auto_map( + config_dict["auto_map"], pretrained_model_name_or_path + ) + return config_dict, kwargs + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig": + """ + Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters. + + Args: + config_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the configuration object. Such a dictionary can be + retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the configuration object. + + Returns: + [`PretrainedConfig`]: The configuration object instantiated from those parameters. + """ + return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) + # Those arguments may be passed along for our internal telemetry. + # We remove them so they don't appear in `return_unused_kwargs`. + kwargs.pop("_from_auto", None) + kwargs.pop("_from_pipeline", None) + # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update. + if "_commit_hash" in kwargs and "_commit_hash" in config_dict: + kwargs["_commit_hash"] = config_dict["_commit_hash"] + + # We remove it from kwargs so that it does not appear in `return_unused_kwargs`. + config_dict["attn_implementation"] = kwargs.pop("attn_implementation", None) + + config = cls(**config_dict) + + if hasattr(config, "pruned_heads"): + config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()} + + # Update config with kwargs if needed + if "num_labels" in kwargs and "id2label" in kwargs: + num_labels = kwargs["num_labels"] + id2label = kwargs["id2label"] if kwargs["id2label"] is not None else [] + if len(id2label) != num_labels: + raise ValueError( + f"You passed along `num_labels={num_labels }` with an incompatible id to label map: " + f"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove " + "one of them." + ) + to_remove = [] + for key, value in kwargs.items(): + if hasattr(config, key): + current_attr = getattr(config, key) + # To authorize passing a custom subconfig as kwarg in models that have nested configs. + if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict): + value = current_attr.__class__(**value) + setattr(config, key, value) + if key != "torch_dtype": + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + logger.info(f"Model config {config}") + if return_unused_kwargs: + return config, kwargs + else: + return config + + @classmethod + def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig": + """ + Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters. + + Args: + json_file (`str` or `os.PathLike`): + Path to the JSON file containing the parameters. + + Returns: + [`PretrainedConfig`]: The configuration object instantiated from that JSON file. + + """ + config_dict = cls._dict_from_json_file(json_file) + return cls(**config_dict) + + @classmethod + def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + def __eq__(self, other): + return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__) + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + def to_diff_dict(self) -> Dict[str, Any]: + """ + Removes all attributes from config which correspond to the default config attributes for better readability and + serializes to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, + """ + config_dict = self.to_dict() + + # get the default config dict + default_config_dict = PretrainedConfig().to_dict() + + # get class specific config dict + class_config_dict = self.__class__().to_dict() if not self.is_composition else {} + + serializable_config_dict = {} + + # only serialize values that differ from the default config + for key, value in config_dict.items(): + if ( + isinstance(getattr(self, key, None), PretrainedConfig) + and key in class_config_dict + and isinstance(class_config_dict[key], dict) + ): + # For nested configs we need to clean the diff recursively + diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None)) + if "model_type" in value: + # Needs to be set even if it's not in the diff + diff["model_type"] = value["model_type"] + if len(diff) > 0: + serializable_config_dict[key] = diff + elif ( + key not in default_config_dict + or key == "transformers_version" + or value != default_config_dict[key] + or (key in class_config_dict and value != class_config_dict[key]) + ): + serializable_config_dict[key] = value + + if hasattr(self, "quantization_config"): + serializable_config_dict["quantization_config"] = ( + self.quantization_config.to_dict() + if not isinstance(self.quantization_config, dict) + else self.quantization_config + ) + + # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable. + _ = serializable_config_dict.pop("_pre_quantization_dtype", None) + + self.dict_torch_dtype_to_str(serializable_config_dict) + + if "_attn_implementation_internal" in serializable_config_dict: + del serializable_config_dict["_attn_implementation_internal"] + + return serializable_config_dict + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + if hasattr(self.__class__, "model_type"): + output["model_type"] = self.__class__.model_type + if "_auto_class" in output: + del output["_auto_class"] + if "_commit_hash" in output: + del output["_commit_hash"] + if "_attn_implementation_internal" in output: + del output["_attn_implementation_internal"] + + # Transformers version when serializing the model + output["transformers_version"] = __version__ + + for key, value in output.items(): + # Deal with nested configs like CLIP + if isinstance(value, PretrainedConfig): + value = value.to_dict() + del value["transformers_version"] + + output[key] = value + + if hasattr(self, "quantization_config"): + output["quantization_config"] = ( + self.quantization_config.to_dict() + if not isinstance(self.quantization_config, dict) + else self.quantization_config + ) + + # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable. + _ = output.pop("_pre_quantization_dtype", None) + + self.dict_torch_dtype_to_str(output) + + return output + + def to_json_string(self, use_diff: bool = True) -> str: + """ + Serializes this instance to a JSON string. + + Args: + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` + is serialized to JSON string. + + Returns: + `str`: String containing all the attributes that make up this configuration instance in JSON format. + """ + if use_diff is True: + config_dict = self.to_diff_dict() + else: + config_dict = self.to_dict() + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this configuration instance's parameters will be saved. + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` + is serialized to JSON file. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string(use_diff=use_diff)) + + def update(self, config_dict: Dict[str, Any]): + """ + Updates attributes of this class with attributes from `config_dict`. + + Args: + config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class. + """ + for key, value in config_dict.items(): + setattr(self, key, value) + + def update_from_string(self, update_str: str): + """ + Updates attributes of this class with attributes from `update_str`. + + The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example: + "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" + + The keys to change have to already exist in the config object. + + Args: + update_str (`str`): String with attributes that should be updated for this class. + + """ + + d = dict(x.split("=") for x in update_str.split(",")) + for k, v in d.items(): + if not hasattr(self, k): + raise ValueError(f"key {k} isn't in the original config dict") + + old_v = getattr(self, k) + if isinstance(old_v, bool): + if v.lower() in ["true", "1", "y", "yes"]: + v = True + elif v.lower() in ["false", "0", "n", "no"]: + v = False + else: + raise ValueError(f"can't derive true or false from {v} (key {k})") + elif isinstance(old_v, int): + v = int(v) + elif isinstance(old_v, float): + v = float(v) + elif not isinstance(old_v, str): + raise ValueError( + f"You can only update int, float, bool or string values in the config, got {v} for key {k}" + ) + + setattr(self, k, v) + + def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None: + """ + Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None, + converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* + string, which can then be stored in the json format. + """ + if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str): + d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1] + for value in d.values(): + if isinstance(value, dict): + self.dict_torch_dtype_to_str(value) + + @classmethod + def register_for_auto_class(cls, auto_class="AutoConfig"): + """ + Register this class with a given auto class. This should only be used for custom configurations as the ones in + the library are already mapped with `AutoConfig`. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`): + The auto class to register this new configuration with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + +def get_configuration_file(configuration_files: List[str]) -> str: + """ + Get the configuration file to use for this version of transformers. + + Args: + configuration_files (`List[str]`): The list of available configuration files. + + Returns: + `str`: The configuration file to use. + """ + configuration_files_map = {} + for file_name in configuration_files: + search = _re_configuration_file.search(file_name) + if search is not None: + v = search.groups()[0] + configuration_files_map[v] = file_name + available_versions = sorted(configuration_files_map.keys()) + + # Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions. + configuration_file = CONFIG_NAME + transformers_version = version.parse(__version__) + for v in available_versions: + if version.parse(v) <= transformers_version: + configuration_file = configuration_files_map[v] + else: + # No point going further since the versions are sorted. + break + + return configuration_file + + +def recursive_diff_dict(dict_a, dict_b, config_obj=None): + """ + Helper function to recursively take the diff between two nested dictionaries. The resulting diff only contains the + values from `dict_a` that are different from values in `dict_b`. + """ + diff = {} + default = config_obj.__class__().to_dict() if config_obj is not None else {} + for key, value in dict_a.items(): + obj_value = getattr(config_obj, str(key), None) + if isinstance(obj_value, PretrainedConfig) and key in dict_b and isinstance(dict_b[key], dict): + diff_value = recursive_diff_dict(value, dict_b[key], config_obj=obj_value) + if len(diff_value) > 0: + diff[key] = diff_value + elif key not in dict_b or value != dict_b[key] or key not in default or value != default[key]: + diff[key] = value + return diff + + +PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub) +if PretrainedConfig.push_to_hub.__doc__ is not None: + PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format( + object="config", object_class="AutoConfig", object_files="configuration file" + ) diff --git a/modified/dependency_versions_check.py b/modified/dependency_versions_check.py new file mode 100644 index 0000000000000000000000000000000000000000..82d07850847ec357f36ff51088ddec36aceff093 --- /dev/null +++ b/modified/dependency_versions_check.py @@ -0,0 +1,63 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dependency_versions_table import deps +from .utils.versions import require_version, require_version_core + + +# define which module versions we always want to check at run time +# (usually the ones defined in `install_requires` in setup.py) +# +# order specific notes: +# - tqdm must be checked before tokenizers + +pkgs_to_check_at_runtime = [ + "python", + "tqdm", + "regex", + "requests", + "packaging", + "filelock", + "numpy", + "tokenizers", + "huggingface-hub", + "safetensors", + "accelerate", + "pyyaml", +] + +for pkg in pkgs_to_check_at_runtime: + if pkg in deps: + if pkg == "tokenizers": + # must be loaded here, or else tqdm check may fail + from .utils import is_tokenizers_available + + if not is_tokenizers_available(): + continue # not required, check version only if installed + elif pkg == "accelerate": + # must be loaded here, or else tqdm check may fail + from .utils import is_accelerate_available + + # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of + # Transformers with PyTorch + if not is_accelerate_available(): + continue # not required, check version only if installed + + require_version_core(deps[pkg]) + else: + raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") + + +def dep_version_check(pkg, hint=None): + require_version(deps[pkg], hint) diff --git a/modified/dependency_versions_table.py b/modified/dependency_versions_table.py new file mode 100644 index 0000000000000000000000000000000000000000..fcace1826ac453d949145b4c798f0084de4801e4 --- /dev/null +++ b/modified/dependency_versions_table.py @@ -0,0 +1,92 @@ +# THIS FILE HAS BEEN AUTOGENERATED. To update: +# 1. modify the `_deps` dict in setup.py +# 2. run `make deps_table_update`` +deps = { + "Pillow": "Pillow>=10.0.1,<=15.0", + "accelerate": "accelerate>=0.21.0", + "av": "av==9.2.0", + "beautifulsoup4": "beautifulsoup4", + "codecarbon": "codecarbon==1.2.0", + "cookiecutter": "cookiecutter==1.7.3", + "dataclasses": "dataclasses", + "datasets": "datasets!=2.5.0", + "decord": "decord==0.6.0", + "deepspeed": "deepspeed>=0.9.3", + "diffusers": "diffusers", + "dill": "dill<0.3.5", + "evaluate": "evaluate>=0.2.0", + "faiss-cpu": "faiss-cpu", + "fastapi": "fastapi", + "filelock": "filelock", + "flax": "flax>=0.4.1,<=0.7.0", + "fsspec": "fsspec<2023.10.0", + "ftfy": "ftfy", + "fugashi": "fugashi>=1.0", + "GitPython": "GitPython<3.1.19", + "hf-doc-builder": "hf-doc-builder>=0.3.0", + "huggingface-hub": "huggingface-hub>=0.19.3,<1.0", + "importlib_metadata": "importlib_metadata", + "ipadic": "ipadic>=1.0.0,<2.0", + "isort": "isort>=5.5.4", + "jax": "jax>=0.4.1,<=0.4.13", + "jaxlib": "jaxlib>=0.4.1,<=0.4.13", + "jieba": "jieba", + "kenlm": "kenlm", + "keras": "keras<2.16", + "keras-nlp": "keras-nlp>=0.3.1", + "librosa": "librosa", + "nltk": "nltk", + "natten": "natten>=0.14.6", + "numpy": "numpy>=1.17", + "onnxconverter-common": "onnxconverter-common", + "onnxruntime-tools": "onnxruntime-tools>=1.4.2", + "onnxruntime": "onnxruntime>=1.4.0", + "opencv-python": "opencv-python", + "optuna": "optuna", + "optax": "optax>=0.0.8,<=0.1.4", + "packaging": "packaging>=20.0", + "parameterized": "parameterized", + "phonemizer": "phonemizer", + "protobuf": "protobuf", + "psutil": "psutil", + "pyyaml": "pyyaml>=5.1", + "pydantic": "pydantic<2", + "pytest": "pytest>=7.2.0", + "pytest-timeout": "pytest-timeout", + "pytest-xdist": "pytest-xdist", + "python": "python>=3.8.0", + "ray[tune]": "ray[tune]>=2.7.0", + "regex": "regex!=2019.12.17", + "requests": "requests", + "rhoknp": "rhoknp>=1.1.0,<1.3.1", + "rjieba": "rjieba", + "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", + "ruff": "ruff==0.1.5", + "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", + "sacremoses": "sacremoses", + "safetensors": "safetensors>=0.3.1", + "sagemaker": "sagemaker>=2.31.0", + "scikit-learn": "scikit-learn", + "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", + "sigopt": "sigopt", + "starlette": "starlette", + "sudachipy": "sudachipy>=0.6.6", + "sudachidict_core": "sudachidict_core>=20220729", + "tensorboard": "tensorboard", + "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.16", + "tensorflow": "tensorflow>=2.6,<2.16", + "tensorflow-text": "tensorflow-text<2.16", + "tf2onnx": "tf2onnx", + "timeout-decorator": "timeout-decorator", + "timm": "timm", + "tokenizers": "tokenizers>=0.14,<0.19", + "torch": "torch>=1.10,!=1.12.0", + "torchaudio": "torchaudio", + "torchvision": "torchvision", + "pyctcdecode": "pyctcdecode>=0.4.0", + "tqdm": "tqdm>=4.27", + "unidic": "unidic>=1.0.2", + "unidic_lite": "unidic_lite>=1.0.7", + "urllib3": "urllib3<2.0.0", + "uvicorn": "uvicorn", +} diff --git a/modified/dynamic_module_utils.py b/modified/dynamic_module_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7cdc0ad93d52684e75b7f45c54618dde3ca6ed78 --- /dev/null +++ b/modified/dynamic_module_utils.py @@ -0,0 +1,627 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to dynamically load objects from the Hub.""" +import filecmp +import importlib +import os +import re +import shutil +import signal +import sys +import typing +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +from huggingface_hub import try_to_load_from_cache + +from .utils import ( + HF_MODULES_CACHE, + TRANSFORMERS_DYNAMIC_MODULE_NAME, + cached_file, + extract_commit_hash, + is_offline_mode, + logging, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def init_hf_modules(): + """ + Creates the cache directory for modules with an init, and adds it to the Python path. + """ + # This function has already been executed if HF_MODULES_CACHE already is in the Python path. + if HF_MODULES_CACHE in sys.path: + return + + sys.path.append(HF_MODULES_CACHE) + os.makedirs(HF_MODULES_CACHE, exist_ok=True) + init_path = Path(HF_MODULES_CACHE) / "__init__.py" + if not init_path.exists(): + init_path.touch() + importlib.invalidate_caches() + + +def create_dynamic_module(name: Union[str, os.PathLike]): + """ + Creates a dynamic module in the cache directory for modules. + + Args: + name (`str` or `os.PathLike`): + The name of the dynamic module to create. + """ + init_hf_modules() + dynamic_module_path = (Path(HF_MODULES_CACHE) / name).resolve() + # If the parent module does not exist yet, recursively create it. + if not dynamic_module_path.parent.exists(): + create_dynamic_module(dynamic_module_path.parent) + os.makedirs(dynamic_module_path, exist_ok=True) + init_path = dynamic_module_path / "__init__.py" + if not init_path.exists(): + init_path.touch() + # It is extremely important to invalidate the cache when we change stuff in those modules, or users end up + # with errors about module that do not exist. Same for all other `invalidate_caches` in this file. + importlib.invalidate_caches() + + +def get_relative_imports(module_file: Union[str, os.PathLike]) -> List[str]: + """ + Get the list of modules that are relatively imported in a module file. + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of relative imports in the module. + """ + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import .xxx` + relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from .xxx import yyy` + relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) + # Unique-ify + return list(set(relative_imports)) + + +def get_relative_import_files(module_file: Union[str, os.PathLike]) -> List[str]: + """ + Get the list of all files that are needed for a given module. Note that this function recurses through the relative + imports (if a imports b and b imports c, it will return module files for b and c). + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all relative imports a given module needs (recursively), which will give us the list + of module files a given module needs. + """ + no_change = False + files_to_check = [module_file] + all_relative_imports = [] + + # Let's recurse through all relative imports + while not no_change: + new_imports = [] + for f in files_to_check: + new_imports.extend(get_relative_imports(f)) + + module_path = Path(module_file).parent + new_import_files = [str(module_path / m) for m in new_imports] + new_import_files = [f for f in new_import_files if f not in all_relative_imports] + files_to_check = [f"{f}.py" for f in new_import_files] + + no_change = len(new_import_files) == 0 + all_relative_imports.extend(files_to_check) + + return all_relative_imports + + +def get_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Extracts all the libraries (not relative imports this time) that are imported in a file. + + Args: + filename (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all packages required to use the input module. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # filter out try/except block so in custom code we can have try/except imports + content = re.sub(r"\s*try\s*:\s*.*?\s*except\s*.*?:", "", content, flags=re.MULTILINE | re.DOTALL) + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + return list(set(imports)) + + +def check_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a + library is missing. + + Args: + filename (`str` or `os.PathLike`): The module file to check. + + Returns: + `List[str]`: The list of relative imports in the file. + """ + imports = get_imports(filename) + missing_packages = [] + for imp in imports: + try: + importlib.import_module(imp) + except ImportError: + missing_packages.append(imp) + + if len(missing_packages) > 0: + raise ImportError( + "This modeling file requires the following packages that were not found in your environment: " + f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" + ) + + return get_relative_imports(filename) + + +def get_class_in_module(class_name: str, module_path: Union[str, os.PathLike]) -> typing.Type: + """ + Import a module on the cache directory for modules and extract a class from it. + + Args: + class_name (`str`): The name of the class to import. + module_path (`str` or `os.PathLike`): The path to the module to import. + + Returns: + `typing.Type`: The class looked for. + """ + module_path = module_path.replace(os.path.sep, ".") + module = importlib.import_module(module_path) + return getattr(module, class_name) + + +def get_cached_module_file( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + repo_type: Optional[str] = None, + _commit_hash: Optional[str] = None, + **deprecated_kwargs, +) -> str: + """ + Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached + Transformers module. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `str`: The path to the module inside the cache. + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + submodule = os.path.basename(pretrained_model_name_or_path) + else: + submodule = pretrained_model_name_or_path.replace("/", os.path.sep) + cached_module = try_to_load_from_cache( + pretrained_model_name_or_path, module_file, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type + ) + + new_files = [] + try: + # Load from URL or cache if already cached + resolved_module_file = cached_file( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + revision=revision, + repo_type=repo_type, + _commit_hash=_commit_hash, + ) + if not is_local and cached_module != resolved_module_file: + new_files.append(module_file) + + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + + # Check we have all the requirements in our environment + modules_needed = check_imports(resolved_module_file) + + # Now we move the module inside our cached dynamic modules. + full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule + create_dynamic_module(full_submodule) + submodule_path = Path(HF_MODULES_CACHE) / full_submodule + if submodule == os.path.basename(pretrained_model_name_or_path): + # We copy local files to avoid putting too many folders in sys.path. This copy is done when the file is new or + # has changed since last copy. + if not (submodule_path / module_file).exists() or not filecmp.cmp( + resolved_module_file, str(submodule_path / module_file) + ): + shutil.copy(resolved_module_file, submodule_path / module_file) + importlib.invalidate_caches() + for module_needed in modules_needed: + module_needed = f"{module_needed}.py" + module_needed_file = os.path.join(pretrained_model_name_or_path, module_needed) + if not (submodule_path / module_needed).exists() or not filecmp.cmp( + module_needed_file, str(submodule_path / module_needed) + ): + shutil.copy(module_needed_file, submodule_path / module_needed) + importlib.invalidate_caches() + else: + # Get the commit hash + commit_hash = extract_commit_hash(resolved_module_file, _commit_hash) + + # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the + # benefit of versioning. + submodule_path = submodule_path / commit_hash + full_submodule = full_submodule + os.path.sep + commit_hash + create_dynamic_module(full_submodule) + + if not (submodule_path / module_file).exists(): + shutil.copy(resolved_module_file, submodule_path / module_file) + importlib.invalidate_caches() + # Make sure we also have every file with relative + for module_needed in modules_needed: + if not (submodule_path / f"{module_needed}.py").exists(): + get_cached_module_file( + pretrained_model_name_or_path, + f"{module_needed}.py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + _commit_hash=commit_hash, + ) + new_files.append(f"{module_needed}.py") + + if len(new_files) > 0 and revision is None: + new_files = "\n".join([f"- {f}" for f in new_files]) + repo_type_str = "" if repo_type is None else f"{repo_type}s/" + url = f"https://huggingface.co/{repo_type_str}{pretrained_model_name_or_path}" + logger.warning( + f"A new version of the following files was downloaded from {url}:\n{new_files}" + "\n. Make sure to double-check they do not contain any added malicious code. To avoid downloading new " + "versions of the code file, you can pin a revision." + ) + + return os.path.join(full_submodule, module_file) + + +def get_class_from_dynamic_module( + class_reference: str, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + repo_type: Optional[str] = None, + code_revision: Optional[str] = None, + **kwargs, +) -> typing.Type: + """ + Extracts a class from a module file, present in the local folder or repository of a model. + + + + Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should + therefore only be called on trusted repos. + + + + Args: + class_reference (`str`): + The full name of the class to load, including its module and optionally its repo. + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + This is used when `class_reference` does not specify another repo. + module_file (`str`): + The name of the module file containing the class to look for. + class_name (`str`): + The name of the class to import in the module. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + code_revision (`str`, *optional*, defaults to `"main"`): + The specific revision to use for the code on the Hub, if the code leaves in a different repository than the + rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for + storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `typing.Type`: The class, dynamically imported from the module. + + Examples: + + ```python + # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("modeling.MyBertModel", "sgugger/my-bert-model") + + # Download module `modeling.py` from a given repo and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("sgugger/my-bert-model--modeling.MyBertModel", "sgugger/another-bert-model") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + # Catch the name of the repo if it's specified in `class_reference` + if "--" in class_reference: + repo_id, class_reference = class_reference.split("--") + else: + repo_id = pretrained_model_name_or_path + module_file, class_name = class_reference.split(".") + + if code_revision is None and pretrained_model_name_or_path == repo_id: + code_revision = revision + # And lastly we get the class inside our newly created module + final_module = get_cached_module_file( + repo_id, + module_file + ".py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=code_revision, + local_files_only=local_files_only, + repo_type=repo_type, + ) + return get_class_in_module(class_name, final_module.replace(".py", "")) + + +def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[Dict] = None) -> List[str]: + """ + Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally + adds the proper fields in a config. + + Args: + obj (`Any`): The object for which to save the module files. + folder (`str` or `os.PathLike`): The folder where to save. + config (`PretrainedConfig` or dictionary, `optional`): + A config in which to register the auto_map corresponding to this custom object. + + Returns: + `List[str]`: The list of files saved. + """ + if obj.__module__ == "__main__": + logger.warning( + f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put " + "this code in a separate module so we can include it in the saved folder and make it easier to share via " + "the Hub." + ) + return + + def _set_auto_map_in_config(_config): + module_name = obj.__class__.__module__ + last_module = module_name.split(".")[-1] + full_name = f"{last_module}.{obj.__class__.__name__}" + # Special handling for tokenizers + if "Tokenizer" in full_name: + slow_tokenizer_class = None + fast_tokenizer_class = None + if obj.__class__.__name__.endswith("Fast"): + # Fast tokenizer: we have the fast tokenizer class and we may have the slow one has an attribute. + fast_tokenizer_class = f"{last_module}.{obj.__class__.__name__}" + if getattr(obj, "slow_tokenizer_class", None) is not None: + slow_tokenizer = getattr(obj, "slow_tokenizer_class") + slow_tok_module_name = slow_tokenizer.__module__ + last_slow_tok_module = slow_tok_module_name.split(".")[-1] + slow_tokenizer_class = f"{last_slow_tok_module}.{slow_tokenizer.__name__}" + else: + # Slow tokenizer: no way to have the fast class + slow_tokenizer_class = f"{last_module}.{obj.__class__.__name__}" + + full_name = (slow_tokenizer_class, fast_tokenizer_class) + + if isinstance(_config, dict): + auto_map = _config.get("auto_map", {}) + auto_map[obj._auto_class] = full_name + _config["auto_map"] = auto_map + elif getattr(_config, "auto_map", None) is not None: + _config.auto_map[obj._auto_class] = full_name + else: + _config.auto_map = {obj._auto_class: full_name} + + # Add object class to the config auto_map + if isinstance(config, (list, tuple)): + for cfg in config: + _set_auto_map_in_config(cfg) + elif config is not None: + _set_auto_map_in_config(config) + + result = [] + # Copy module file to the output folder. + object_file = sys.modules[obj.__module__].__file__ + dest_file = Path(folder) / (Path(object_file).name) + shutil.copy(object_file, dest_file) + result.append(dest_file) + + # Gather all relative imports recursively and make sure they are copied as well. + for needed_file in get_relative_import_files(object_file): + dest_file = Path(folder) / (Path(needed_file).name) + shutil.copy(needed_file, dest_file) + result.append(dest_file) + + return result + + +def _raise_timeout_error(signum, frame): + raise ValueError( + "Loading this model requires you to execute custom code contained in the model repository on your local " + "machine. Please set the option `trust_remote_code=True` to permit loading of this model." + ) + + +TIME_OUT_REMOTE_CODE = 15 + + +def resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code): + if trust_remote_code is None: + if has_local_code: + trust_remote_code = False + elif has_remote_code and TIME_OUT_REMOTE_CODE > 0: + try: + signal.signal(signal.SIGALRM, _raise_timeout_error) + signal.alarm(TIME_OUT_REMOTE_CODE) + while trust_remote_code is None: + answer = input( + f"The repository for {model_name} contains custom code which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" + f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n" + f"Do you wish to run the custom code? [y/N] " + ) + if answer.lower() in ["yes", "y", "1"]: + trust_remote_code = True + elif answer.lower() in ["no", "n", "0", ""]: + trust_remote_code = False + signal.alarm(0) + except Exception: + # OS which does not support signal.SIGALRM + raise ValueError( + f"The repository for {model_name} contains custom code which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + elif has_remote_code: + # For the CI which puts the timeout at 0 + _raise_timeout_error(None, None) + + if has_remote_code and not has_local_code and not trust_remote_code: + raise ValueError( + f"Loading {model_name} requires you to execute the configuration file in that" + " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" + " set the option `trust_remote_code=True` to remove this error." + ) + + return trust_remote_code diff --git a/modified/generation/__init__.py b/modified/generation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a46cb4fa910ada3921ea38a8c722817eee46749b --- /dev/null +++ b/modified/generation/__init__.py @@ -0,0 +1,296 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available + + +_import_structure = { + "configuration_utils": ["GenerationConfig"], + "streamers": ["TextIteratorStreamer", "TextStreamer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["beam_constraints"] = [ + "Constraint", + "ConstraintListState", + "DisjunctiveConstraint", + "PhrasalConstraint", + ] + _import_structure["beam_search"] = [ + "BeamHypotheses", + "BeamScorer", + "BeamSearchScorer", + "ConstrainedBeamSearchScorer", + ] + _import_structure["logits_process"] = [ + "AlternatingCodebooksLogitsProcessor", + "ClassifierFreeGuidanceLogitsProcessor", + "EncoderNoRepeatNGramLogitsProcessor", + "EncoderRepetitionPenaltyLogitsProcessor", + "EpsilonLogitsWarper", + "EtaLogitsWarper", + "ExponentialDecayLengthPenalty", + "ForcedBOSTokenLogitsProcessor", + "ForcedEOSTokenLogitsProcessor", + "ForceTokensLogitsProcessor", + "HammingDiversityLogitsProcessor", + "InfNanRemoveLogitsProcessor", + "LogitNormalization", + "LogitsProcessor", + "LogitsProcessorList", + "LogitsWarper", + "MinLengthLogitsProcessor", + "MinNewTokensLengthLogitsProcessor", + "NoBadWordsLogitsProcessor", + "NoRepeatNGramLogitsProcessor", + "PrefixConstrainedLogitsProcessor", + "RepetitionPenaltyLogitsProcessor", + "SequenceBiasLogitsProcessor", + "SuppressTokensLogitsProcessor", + "SuppressTokensAtBeginLogitsProcessor", + "TemperatureLogitsWarper", + "TopKLogitsWarper", + "TopPLogitsWarper", + "TypicalLogitsWarper", + "UnbatchedClassifierFreeGuidanceLogitsProcessor", + "WhisperTimeStampLogitsProcessor", + ] + _import_structure["stopping_criteria"] = [ + "MaxNewTokensCriteria", + "MaxLengthCriteria", + "MaxTimeCriteria", + "StoppingCriteria", + "StoppingCriteriaList", + "validate_stopping_criteria", + ] + _import_structure["utils"] = [ + "GenerationMixin", + "top_k_top_p_filtering", + "GreedySearchEncoderDecoderOutput", + "GreedySearchDecoderOnlyOutput", + "SampleEncoderDecoderOutput", + "SampleDecoderOnlyOutput", + "BeamSearchEncoderDecoderOutput", + "BeamSearchDecoderOnlyOutput", + "BeamSampleEncoderDecoderOutput", + "BeamSampleDecoderOnlyOutput", + "ContrastiveSearchEncoderDecoderOutput", + "ContrastiveSearchDecoderOnlyOutput", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tf_logits_process"] = [ + "TFForcedBOSTokenLogitsProcessor", + "TFForcedEOSTokenLogitsProcessor", + "TFForceTokensLogitsProcessor", + "TFLogitsProcessor", + "TFLogitsProcessorList", + "TFLogitsWarper", + "TFMinLengthLogitsProcessor", + "TFNoBadWordsLogitsProcessor", + "TFNoRepeatNGramLogitsProcessor", + "TFRepetitionPenaltyLogitsProcessor", + "TFSuppressTokensAtBeginLogitsProcessor", + "TFSuppressTokensLogitsProcessor", + "TFTemperatureLogitsWarper", + "TFTopKLogitsWarper", + "TFTopPLogitsWarper", + ] + _import_structure["tf_utils"] = [ + "TFGenerationMixin", + "tf_top_k_top_p_filtering", + "TFGreedySearchDecoderOnlyOutput", + "TFGreedySearchEncoderDecoderOutput", + "TFSampleEncoderDecoderOutput", + "TFSampleDecoderOnlyOutput", + "TFBeamSearchEncoderDecoderOutput", + "TFBeamSearchDecoderOnlyOutput", + "TFBeamSampleEncoderDecoderOutput", + "TFBeamSampleDecoderOnlyOutput", + "TFContrastiveSearchEncoderDecoderOutput", + "TFContrastiveSearchDecoderOnlyOutput", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["flax_logits_process"] = [ + "FlaxForcedBOSTokenLogitsProcessor", + "FlaxForcedEOSTokenLogitsProcessor", + "FlaxForceTokensLogitsProcessor", + "FlaxLogitsProcessor", + "FlaxLogitsProcessorList", + "FlaxLogitsWarper", + "FlaxMinLengthLogitsProcessor", + "FlaxSuppressTokensAtBeginLogitsProcessor", + "FlaxSuppressTokensLogitsProcessor", + "FlaxTemperatureLogitsWarper", + "FlaxTopKLogitsWarper", + "FlaxTopPLogitsWarper", + "FlaxWhisperTimeStampLogitsProcessor", + ] + _import_structure["flax_utils"] = [ + "FlaxGenerationMixin", + "FlaxGreedySearchOutput", + "FlaxSampleOutput", + "FlaxBeamSearchOutput", + ] + +if TYPE_CHECKING: + from .configuration_utils import GenerationConfig + from .streamers import TextIteratorStreamer, TextStreamer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint + from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer + from .logits_process import ( + AlternatingCodebooksLogitsProcessor, + ClassifierFreeGuidanceLogitsProcessor, + EncoderNoRepeatNGramLogitsProcessor, + EncoderRepetitionPenaltyLogitsProcessor, + EpsilonLogitsWarper, + EtaLogitsWarper, + ExponentialDecayLengthPenalty, + ForcedBOSTokenLogitsProcessor, + ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, + HammingDiversityLogitsProcessor, + InfNanRemoveLogitsProcessor, + LogitNormalization, + LogitsProcessor, + LogitsProcessorList, + LogitsWarper, + MinLengthLogitsProcessor, + MinNewTokensLengthLogitsProcessor, + NoBadWordsLogitsProcessor, + NoRepeatNGramLogitsProcessor, + PrefixConstrainedLogitsProcessor, + RepetitionPenaltyLogitsProcessor, + SequenceBiasLogitsProcessor, + SuppressTokensAtBeginLogitsProcessor, + SuppressTokensLogitsProcessor, + TemperatureLogitsWarper, + TopKLogitsWarper, + TopPLogitsWarper, + TypicalLogitsWarper, + UnbatchedClassifierFreeGuidanceLogitsProcessor, + WhisperTimeStampLogitsProcessor, + ) + from .stopping_criteria import ( + MaxLengthCriteria, + MaxNewTokensCriteria, + MaxTimeCriteria, + StoppingCriteria, + StoppingCriteriaList, + validate_stopping_criteria, + ) + from .utils import ( + BeamSampleDecoderOnlyOutput, + BeamSampleEncoderDecoderOutput, + BeamSearchDecoderOnlyOutput, + BeamSearchEncoderDecoderOutput, + ContrastiveSearchDecoderOnlyOutput, + ContrastiveSearchEncoderDecoderOutput, + GenerationMixin, + GreedySearchDecoderOnlyOutput, + GreedySearchEncoderDecoderOutput, + SampleDecoderOnlyOutput, + SampleEncoderDecoderOutput, + top_k_top_p_filtering, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tf_logits_process import ( + TFForcedBOSTokenLogitsProcessor, + TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, + TFLogitsProcessor, + TFLogitsProcessorList, + TFLogitsWarper, + TFMinLengthLogitsProcessor, + TFNoBadWordsLogitsProcessor, + TFNoRepeatNGramLogitsProcessor, + TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, + TFTemperatureLogitsWarper, + TFTopKLogitsWarper, + TFTopPLogitsWarper, + ) + from .tf_utils import ( + TFBeamSampleDecoderOnlyOutput, + TFBeamSampleEncoderDecoderOutput, + TFBeamSearchDecoderOnlyOutput, + TFBeamSearchEncoderDecoderOutput, + TFContrastiveSearchDecoderOnlyOutput, + TFContrastiveSearchEncoderDecoderOutput, + TFGenerationMixin, + TFGreedySearchDecoderOnlyOutput, + TFGreedySearchEncoderDecoderOutput, + TFSampleDecoderOnlyOutput, + TFSampleEncoderDecoderOutput, + tf_top_k_top_p_filtering, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .flax_logits_process import ( + FlaxForcedBOSTokenLogitsProcessor, + FlaxForcedEOSTokenLogitsProcessor, + FlaxForceTokensLogitsProcessor, + FlaxLogitsProcessor, + FlaxLogitsProcessorList, + FlaxLogitsWarper, + FlaxMinLengthLogitsProcessor, + FlaxSuppressTokensAtBeginLogitsProcessor, + FlaxSuppressTokensLogitsProcessor, + FlaxTemperatureLogitsWarper, + FlaxTopKLogitsWarper, + FlaxTopPLogitsWarper, + FlaxWhisperTimeStampLogitsProcessor, + ) + from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/modified/generation/__pycache__/__init__.cpython-39.pyc b/modified/generation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..464428cd221a4e1b69c0aef589ea960144e1a921 Binary files /dev/null and b/modified/generation/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/beam_constraints.cpython-39.pyc b/modified/generation/__pycache__/beam_constraints.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b94d5efe9cd87ec8a5bf3e5ac9a87fbc02cd5098 Binary files /dev/null and b/modified/generation/__pycache__/beam_constraints.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/beam_search.cpython-39.pyc b/modified/generation/__pycache__/beam_search.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a80597a1b1790f98e786604941b04ff1a73e20ea Binary files /dev/null and b/modified/generation/__pycache__/beam_search.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/configuration_utils.cpython-39.pyc b/modified/generation/__pycache__/configuration_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ac7b68a95fb0814d8e7bea95e6262da09dae6cd Binary files /dev/null and b/modified/generation/__pycache__/configuration_utils.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/flax_logits_process.cpython-39.pyc b/modified/generation/__pycache__/flax_logits_process.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf509602d196cda47b54250dab26a3272d1069ab Binary files /dev/null and b/modified/generation/__pycache__/flax_logits_process.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/flax_utils.cpython-39.pyc b/modified/generation/__pycache__/flax_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05d893275c1e013485790d3316da2e658b464237 Binary files /dev/null and b/modified/generation/__pycache__/flax_utils.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/logits_process.cpython-39.pyc b/modified/generation/__pycache__/logits_process.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c10cf996b7d9736dde3fdf6c0b5a40de1bb03d01 Binary files /dev/null and b/modified/generation/__pycache__/logits_process.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/stopping_criteria.cpython-39.pyc b/modified/generation/__pycache__/stopping_criteria.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52fc8239dc663e0223bf99665e1881b2e475881d Binary files /dev/null and b/modified/generation/__pycache__/stopping_criteria.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/streamers.cpython-39.pyc b/modified/generation/__pycache__/streamers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04aaffbeeab85d6eb511a3fd0a575390fd5b90de Binary files /dev/null and b/modified/generation/__pycache__/streamers.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/tf_logits_process.cpython-39.pyc b/modified/generation/__pycache__/tf_logits_process.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..690c8d78a92dc8eb5346e35723a9b09e5a6fda92 Binary files /dev/null and b/modified/generation/__pycache__/tf_logits_process.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/tf_utils.cpython-39.pyc b/modified/generation/__pycache__/tf_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29c259efb41b1dc7b17659ddbbeb55d95fa9c51d Binary files /dev/null and b/modified/generation/__pycache__/tf_utils.cpython-39.pyc differ diff --git a/modified/generation/__pycache__/utils.cpython-39.pyc b/modified/generation/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd8b7fdba9f3f69600f5651107d7fd40d5857263 Binary files /dev/null and b/modified/generation/__pycache__/utils.cpython-39.pyc differ diff --git a/modified/generation/beam_constraints.py b/modified/generation/beam_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..b53c4512427a8793449da9f68c39a12527721d40 --- /dev/null +++ b/modified/generation/beam_constraints.py @@ -0,0 +1,521 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + + +class Constraint(ABC): + r"""Abstract base class for all constraints that can be applied during generation. + It must define how the constraint can be satisfied. + + All classes that inherit Constraint must follow the requirement that + + ```py + completed = False + while not completed: + _, completed = constraint.update(constraint.advance()) + ``` + + will always terminate (halt). + """ + + def __init__(self): + # test for the above condition + self.test() + + def test(self): + """ + Tests whether this constraint has been properly defined. + """ + counter = 0 + completed = False + while not completed: + if counter == 1: + self.reset() + advance = self.advance() + if not self.does_advance(advance): + raise Exception( + "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." + ) + + stepped, completed, reset = self.update(advance) + counter += 1 + + if counter > 10000: + raise Exception("update() does not fulfill the constraint.") + + if self.remaining() != 0: + raise Exception("Custom Constraint is not defined correctly.") + + @abstractmethod + def advance(self): + """ + When called, returns the token that would take this constraint one step closer to being fulfilled. + + Return: + token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def does_advance(self, token_id: int): + """ + Reads in a token and returns whether it creates progress. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def update(self, token_id: int): + """ + Reads in a token and returns booleans that indicate the progress made by it. This function will update the + state of this object unlikes `does_advance(self, token_id: int)`. + + This isn't to test whether a certain token will advance the progress; it's to update its state as if it has + been generated. This becomes important if token_id != desired token (refer to else statement in + PhrasalConstraint) + + Args: + token_id(`int`): + The id of a newly generated token in the beam search. + Return: + stepped(`bool`): + Whether this constraint has become one step closer to being fulfuilled. + completed(`bool`): + Whether this constraint has been completely fulfilled by this token being generated. + reset (`bool`): + Whether this constraint has reset its progress by this token being generated. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def reset(self): + """ + Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of + a constraint is abrupted by an unwanted token. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def remaining(self): + """ + Returns the number of remaining steps of `advance()` in order to complete this constraint. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def copy(self, stateful=False): + """ + Creates a new instance of this constraint. + + Args: + stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state. + + Return: + constraint(`Constraint`): The same constraint as the one being called from. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class PhrasalConstraint(Constraint): + r""" + [`Constraint`] enforcing that an ordered sequence of tokens is included in the output. + + Args: + token_ids (`List[int]`): + The id of the token that must be generated by the output. + """ + + def __init__(self, token_ids: List[int]): + super(Constraint, self).__init__() + + if not isinstance(token_ids, list) or len(token_ids) == 0: + raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") + if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids): + raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") + + self.token_ids = token_ids + + self.seqlen = len(self.token_ids) + self.fulfilled_idx = -1 # the index of the currently fulfilled step + self.completed = False + + def advance(self): + if self.completed: + return None + return self.token_ids[self.fulfilled_idx + 1] + + def does_advance(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") + + if self.completed: + return False + + return token_id == self.token_ids[self.fulfilled_idx + 1] + + def update(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") + + stepped = False + completed = False + reset = False + + if self.does_advance(token_id): + self.fulfilled_idx += 1 + stepped = True + if self.fulfilled_idx == (self.seqlen - 1): + completed = True + self.completed = completed + else: + # failed to make progress. + reset = True + self.reset() + return stepped, completed, reset + + def reset(self): + self.completed = False + self.fulfilled_idx = 0 + + def remaining(self): + return self.seqlen - (self.fulfilled_idx + 1) + + def copy(self, stateful=False): + new_constraint = PhrasalConstraint(self.token_ids) + + if stateful: + new_constraint.seq_len = self.seqlen + new_constraint.fulfilled_idx = self.fulfilled_idx + new_constraint.completed = self.completed + + return new_constraint + + +class DisjunctiveTrie: + def __init__(self, nested_token_ids: List[List[int]], no_subsets=True): + r""" + A helper class that builds a trie with the words represented in `nested_token_ids`. + """ + self.max_height = max([len(one) for one in nested_token_ids]) + + root = {} + for token_ids in nested_token_ids: + level = root + for tidx, token_id in enumerate(token_ids): + if token_id not in level: + level[token_id] = {} + + level = level[token_id] + + if no_subsets and self.has_subsets(root, nested_token_ids): + raise ValueError( + "Each list in `nested_token_ids` can't be a complete subset of another list, but is" + f" {nested_token_ids}." + ) + + self.trie = root + + def next_tokens(self, current_seq): + """ + The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`. + """ + start = self.trie + + for current_token in current_seq: + start = start[current_token] + + next_tokens = list(start.keys()) + + return next_tokens + + def reached_leaf(self, current_seq): + next_tokens = self.next_tokens(current_seq) + + return len(next_tokens) == 0 + + def count_leaves(self, root): + next_nodes = list(root.values()) + if len(next_nodes) == 0: + return 1 + else: + return sum([self.count_leaves(nn) for nn in next_nodes]) + + def has_subsets(self, trie, nested_token_ids): + """ + Returns whether # of leaves == # of words. Otherwise some word is a subset of another. + """ + leaf_count = self.count_leaves(trie) + return len(nested_token_ids) != leaf_count + + +class DisjunctiveConstraint(Constraint): + r""" + A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints. + + Args: + nested_token_ids (`List[List[int]]`): + A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from + the list of words. + """ + + def __init__(self, nested_token_ids: List[List[int]]): + super(Constraint, self).__init__() + + if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0: + raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") + if any(not isinstance(token_ids, list) for token_ids in nested_token_ids): + raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") + if any( + any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) + for token_ids in nested_token_ids + ): + raise ValueError( + f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." + ) + + self.trie = DisjunctiveTrie(nested_token_ids) + self.token_ids = nested_token_ids + + self.seqlen = self.trie.max_height + self.current_seq = [] + self.completed = False + + def advance(self): + token_list = self.trie.next_tokens(self.current_seq) + + if len(token_list) == 0: + return None + else: + return token_list + + def does_advance(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") + + next_tokens = self.trie.next_tokens(self.current_seq) + + return token_id in next_tokens + + def update(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") + + stepped = False + completed = False + reset = False + + if self.does_advance(token_id): + self.current_seq.append(token_id) + stepped = True + else: + reset = True + self.reset() + + completed = self.trie.reached_leaf(self.current_seq) + self.completed = completed + + return stepped, completed, reset + + def reset(self): + self.completed = False + self.current_seq = [] + + def remaining(self): + if self.completed: + # since this can be completed without reaching max height + return 0 + else: + return self.seqlen - len(self.current_seq) + + def copy(self, stateful=False): + new_constraint = DisjunctiveConstraint(self.token_ids) + + if stateful: + new_constraint.seq_len = self.seqlen + new_constraint.current_seq = self.current_seq + new_constraint.completed = self.completed + + return new_constraint + + +class ConstraintListState: + r""" + A class for beam scorers to track its progress through a list of constraints. + + Args: + constraints (`List[Constraint]`): + A list of [`Constraint`] objects that must be fulfilled by the beam scorer. + """ + + def __init__(self, constraints: List[Constraint]): + self.constraints = constraints + + # max # of steps required to fulfill a given constraint + self.max_seqlen = max([c.seqlen for c in constraints]) + self.n_constraints = len(constraints) + self.completed = False + + self.init_state() + + def init_state(self): + self.complete_constraints = [] + self.inprogress_constraint = None + self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints] + + def get_bank(self): + add = 0 + if self.inprogress_constraint: + # extra points for having a constraint mid-fulfilled + add += self.max_seqlen - self.inprogress_constraint.remaining() + + return (len(self.complete_constraints) * self.max_seqlen) + add + + def advance(self): + """The list of tokens to generate such that we can make progress. + By "list" we don't mean the list of token that will fully fulfill a constraint. + + Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a + specific constraint `c_i`, we return: + + `[t_k1 for k in indices of unfulfilled constraints]` + + If we are in the middle of a constraint, then we return: + `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint. + + Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, + that's the only one we'll return. + """ + token_list = [] + if self.inprogress_constraint is None: + for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" + advance = constraint.advance() + if isinstance(advance, int): + token_list.append(advance) + elif isinstance(advance, list): + token_list.extend(advance) + else: + advance = self.inprogress_constraint.advance() + if isinstance(advance, int): + token_list.append(advance) + elif isinstance(advance, list): + token_list.extend(advance) + + if len(token_list) == 0: + return None + else: + return token_list + + def reset(self, token_ids: Optional[List[int]]): + """ + token_ids: the tokens generated thus far to reset the state of the progress through constraints. + """ + self.init_state() + + if token_ids is not None: + for token in token_ids: + # completes or steps **one** constraint + complete, stepped = self.add(token) + + # the entire list of constraints are fulfilled + if self.completed: + break + + def add(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.") + + complete, stepped = False, False + + if self.completed: + complete = True + stepped = False + return complete, stepped + + if self.inprogress_constraint is not None: + # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current + # job, simply update the state + + stepped, complete, reset = self.inprogress_constraint.update(token_id) + if reset: + # 1. If the next token breaks the progress, then we must restart. + # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". + + # But that doesn't mean we self.init_state(), since we only reset the state for this particular + # constraint, not the full list of constraints. + + self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False)) + self.inprogress_constraint = None + + if complete: + # 2. If the next token completes the constraint, move it to completed list, set + # inprogress to None. If there are no pending constraints either, then this full list of constraints + # is complete. + + self.complete_constraints.append(self.inprogress_constraint) + self.inprogress_constraint = None + + if len(self.pending_constraints) == 0: + # we're done! + self.completed = True + + else: + # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list + # of constraints? + + for cidx, pending_constraint in enumerate(self.pending_constraints): + if pending_constraint.does_advance(token_id): + stepped, complete, reset = pending_constraint.update(token_id) + + if not stepped: + raise Exception( + "`constraint.update(token_id)` is not yielding incremental progress, " + "even though `constraint.does_advance(token_id)` is true." + ) + + if complete: + self.complete_constraints.append(pending_constraint) + self.inprogress_constraint = None + + if not complete and stepped: + self.inprogress_constraint = pending_constraint + + if complete or stepped: + # If we made any progress at all, then it's at least not a "pending constraint". + + self.pending_constraints = ( + self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] + ) + + if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: + # If there's no longer any pending after this and no inprogress either, then we must be + # complete. + + self.completed = True + + break # prevent accidentally stepping through multiple constraints with just one token. + + return complete, stepped + + def copy(self, stateful=True): + new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects + # throughout this process. So it's at initialization state. + + if stateful: + new_state.complete_constraints = [ + constraint.copy(stateful=True) for constraint in self.complete_constraints + ] + if self.inprogress_constraint is not None: + new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True) + new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints] + + return new_state diff --git a/modified/generation/beam_search.py b/modified/generation/beam_search.py new file mode 100644 index 0000000000000000000000000000000000000000..5e73862e163ddfd53122420b88614d2818681e78 --- /dev/null +++ b/modified/generation/beam_search.py @@ -0,0 +1,1005 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from collections import UserDict +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..utils import add_start_docstrings +from .beam_constraints import Constraint, ConstraintListState + + +PROCESS_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): + Current scores of the top `2 * num_beams` non-finished beam hypotheses. + next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. + next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + Beam indices indicating to which beam hypothesis the `next_tokens` correspond. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + beam_indices (`torch.LongTensor`, *optional*): + Beam indices indicating to which beam hypothesis each token correspond. + group_index (`int`, *optional*): + The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`]. + + Return: + `UserDict`: A dictionary composed of the fields as defined above: + + - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all + non-finished beams. + - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added + to the non-finished beam_hypotheses. + - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices + indicating to which beam the next tokens shall be added. + +""" + +FINALIZE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`): + The final scores of all non-finished beams. + final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`): + The last tokens to be added to the non-finished beam_hypotheses. + final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`): + The beam indices indicating to which beam the `final_beam_tokens` shall be added. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Return: + `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. + The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early + due to the `eos_token_id`. + +""" + + +class BeamScorer(ABC): + """ + Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and + [`~PreTrainedModel.beam_sample`]. + """ + + @abstractmethod + @add_start_docstrings(PROCESS_INPUTS_DOCSTRING) + def process( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + **kwargs, + ) -> Tuple[torch.Tensor]: + raise NotImplementedError("This is an abstract method.") + + @abstractmethod + @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING) + def finalize( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + max_length: int, + **kwargs, + ) -> torch.LongTensor: + raise NotImplementedError("This is an abstract method.") + + +class BeamSearchScorer(BeamScorer): + r""" + [`BeamScorer`] implementing standard beam search decoding. + + Adapted in part from [Facebook's XLM beam search + code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529). + + Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS + implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua) + + Args: + batch_size (`int`): + Batch Size of `input_ids` for which standard beam search decoding is run in parallel. + num_beams (`int`): + Number of beams for beam search. + device (`torch.device`): + Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be + allocated. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: + `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very unlikely to find better candidates; + `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical + beam search algorithm). + num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): + The number of beam hypotheses that shall be returned upon calling + [`~transformers.BeamSearchScorer.finalize`]. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + max_length (`int`, *optional*): + The maximum length of the sequence to be generated. + """ + + def __init__( + self, + batch_size: int, + num_beams: int, + device: torch.device, + length_penalty: Optional[float] = 1.0, + do_early_stopping: Optional[Union[bool, str]] = False, + num_beam_hyps_to_keep: Optional[int] = 1, + num_beam_groups: Optional[int] = 1, + max_length: Optional[int] = None, + ): + self.num_beams = num_beams + self.device = device + self.length_penalty = length_penalty + self.do_early_stopping = do_early_stopping + self.num_beam_hyps_to_keep = num_beam_hyps_to_keep + self.num_beam_groups = num_beam_groups + self.group_size = self.num_beams // self.num_beam_groups + + self._is_init = False + # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch. + # If group_beam_search is not used, the list consists of `batch_size` beam_hyps. + self._beam_hyps = [ + BeamHypotheses( + num_beams=self.group_size, + length_penalty=self.length_penalty, + early_stopping=self.do_early_stopping, + max_length=max_length, + ) + for _ in range(batch_size * self.num_beam_groups) + ] + # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group + # in the i-th mini-batch is complete. + self._done = torch.tensor( + [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device + ) + + if not isinstance(num_beams, int) or num_beams <= 1: + raise ValueError( + f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," + " one should make use of `greedy_search` instead." + ) + + if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): + raise ValueError( + "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" + f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." + ) + + @property + def is_done(self) -> bool: + return self._done.all() + + def process( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + group_index: Optional[int] = 0, + decoder_prompt_len: Optional[int] = 0, + ) -> Dict[str, torch.Tensor]: + # add up to the length which the next_scores is calculated on (including decoder prompt) + cur_len = input_ids.shape[-1] + 1 + batch_size = len(self._beam_hyps) // self.num_beam_groups + + if not (batch_size == (input_ids.shape[0] // self.group_size)): + if self.num_beam_groups > 1: + raise ValueError( + f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " + f"size of {self.group_size} is expected by the beam scorer." + ) + else: + raise ValueError( + f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " + f"{self.group_size} is expected by the beam scorer." + ) + + device = input_ids.device + next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) + next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) + next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + for batch_idx in range(batch_size): + batch_group_idx = batch_idx * self.num_beam_groups + group_index + if self._done[batch_group_idx]: + if self.num_beams < len(self._beam_hyps[batch_group_idx]): + raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") + if eos_token_id is None or pad_token_id is None: + raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") + # pad the batch + next_beam_scores[batch_idx, :] = 0 + next_beam_tokens[batch_idx, :] = pad_token_id + next_beam_indices[batch_idx, :] = 0 + continue + + # next tokens for this sentence + beam_idx = 0 + for beam_token_rank, (next_token, next_score, next_index) in enumerate( + zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) + ): + batch_beam_idx = batch_idx * self.group_size + next_index + # add to generated hypotheses if end of sentence + if (eos_token_id is not None) and (next_token.item() in eos_token_id): + # if beam_token does not belong to top num_beams tokens, it should not be added + is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size + if is_beam_token_worse_than_top_num_beams: + continue + if beam_indices is not None: + beam_index = beam_indices[batch_beam_idx] + beam_index = beam_index + (batch_beam_idx,) + else: + beam_index = None + + self._beam_hyps[batch_group_idx].add( + input_ids[batch_beam_idx].clone(), + next_score.item(), + beam_indices=beam_index, + generated_len=cur_len - decoder_prompt_len, + ) + else: + # add next predicted token since it is not eos_token + next_beam_scores[batch_idx, beam_idx] = next_score + next_beam_tokens[batch_idx, beam_idx] = next_token + next_beam_indices[batch_idx, beam_idx] = batch_beam_idx + beam_idx += 1 + + # once the beam for next step is full, don't add more tokens to it. + if beam_idx == self.group_size: + break + + if beam_idx < self.group_size: + raise ValueError( + f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" + f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." + ) + + # Check if we are done so that we can save a pad step if all(done) + self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done( + next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len + ) + + return UserDict( + { + "next_beam_scores": next_beam_scores.view(-1), + "next_beam_tokens": next_beam_tokens.view(-1), + "next_beam_indices": next_beam_indices.view(-1), + } + ) + + def finalize( + self, + input_ids: torch.LongTensor, + final_beam_scores: torch.FloatTensor, + final_beam_tokens: torch.LongTensor, + final_beam_indices: torch.LongTensor, + max_length: int, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ) -> Tuple[torch.LongTensor]: + batch_size = len(self._beam_hyps) // self.num_beam_groups + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + # finalize all open beam hypotheses and add to generated hypotheses + for batch_group_idx, beam_hyp in enumerate(self._beam_hyps): + if self._done[batch_group_idx]: + continue + + # all open beam hypotheses are added to the beam hypothesis + # beam hypothesis class automatically keeps the best beams + for index_per_group in range(self.group_size): + batch_beam_idx = batch_group_idx * self.group_size + index_per_group + final_score = final_beam_scores[batch_beam_idx].item() + final_tokens = input_ids[batch_beam_idx] + beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None + generated_len = final_tokens.shape[-1] - decoder_prompt_len + beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len) + + # select the best hypotheses + sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) + best = [] + best_indices = [] + best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) + + # retrieve best hypotheses + for i in range(batch_size): + beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups] + candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams] + sorted_hyps = sorted(candidate_beams, key=lambda x: x[0]) + for j in range(self.num_beam_hyps_to_keep): + best_hyp_tuple = sorted_hyps.pop() + best_score = best_hyp_tuple[0] + best_hyp = best_hyp_tuple[1] + best_index = best_hyp_tuple[2] + sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) + + # append hyp to lists + best.append(best_hyp) + + # append indices to list + best_indices.append(best_index) + + best_scores[i * self.num_beam_hyps_to_keep + j] = best_score + + # prepare for adding eos + sent_lengths_max = sent_lengths.max().item() + 1 + sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max + decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + + if len(best_indices) > 0 and best_indices[0] is not None: + indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + else: + indices = None + + # shorter batches are padded if needed + if sent_lengths.min().item() != sent_lengths.max().item(): + if pad_token_id is None: + raise ValueError("`pad_token_id` has to be defined") + decoded.fill_(pad_token_id) + + if indices is not None: + indices.fill_(-1) + + # fill with hypotheses and eos_token_id if the latter fits in + for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): + decoded[i, : sent_lengths[i]] = hypo + + if indices is not None: + indices[i, : len(best_idx)] = torch.tensor(best_idx) + + if sent_lengths[i] < sent_max_len: + # inserting only the first eos_token_id + decoded[i, sent_lengths[i]] = eos_token_id[0] + + return UserDict( + { + "sequences": decoded, + "sequence_scores": best_scores, + "beam_indices": indices, + } + ) + + +class ConstrainedBeamSearchScorer(BeamScorer): + r""" + [`BeamScorer`] implementing constrained beam search decoding. + + + Args: + batch_size (`int`): + Batch Size of `input_ids` for which standard beam search decoding is run in parallel. + num_beams (`int`): + Number of beams for beam search. + constraints (`List[Constraint]`): + A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation + output. For more information, the documentation of [`Constraint`] should be read. + device (`torch.device`): + Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be + allocated. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: + `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very unlikely to find better candidates; + `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical + beam search algorithm). + num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): + The number of beam hypotheses that shall be returned upon calling + [`~transformers.BeamSearchScorer.finalize`]. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + max_length (`int`, *optional*): + The maximum length of the sequence to be generated. + """ + + def __init__( + self, + batch_size: int, + num_beams: int, + constraints: List[Constraint], + device: torch.device, + length_penalty: Optional[float] = 1.0, + do_early_stopping: Optional[Union[bool, str]] = False, + num_beam_hyps_to_keep: Optional[int] = 1, + num_beam_groups: Optional[int] = 1, + max_length: Optional[int] = None, + ): + self.num_beams = num_beams + self.device = device + self.length_penalty = length_penalty + self.do_early_stopping = do_early_stopping + self.num_beam_hyps_to_keep = num_beam_hyps_to_keep + self.num_beam_groups = num_beam_groups + self.group_size = self.num_beams // self.num_beam_groups + self.constraints = constraints + + self._is_init = False + self._beam_hyps = [ + BeamHypotheses( + num_beams=self.num_beams, + length_penalty=self.length_penalty, + early_stopping=self.do_early_stopping, + max_length=max_length, + ) + for _ in range(batch_size) + ] + self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device) + + if not isinstance(num_beams, int) or num_beams <= 1: + raise ValueError( + f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," + " one should make use of `greedy_search` instead." + ) + + if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): + raise ValueError( + "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" + f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." + ) + + @property + def is_done(self) -> bool: + return self._done.all() + + def make_constraint_states(self, n): + return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)] + + def check_completes_constraints(self, sequence): + new_state = self.make_constraint_states(1)[0] + new_state.reset(sequence) + return new_state.completed + + def process( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + scores_for_all_vocab: torch.FloatTensor, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ) -> Tuple[torch.Tensor]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): + Current scores of the top `2 * num_beams` non-finished beam hypotheses. + next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. + next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + Beam indices indicating to which beam hypothesis the `next_tokens` correspond. + scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`): + The scores of all tokens in the vocabulary for each of the beam hypotheses. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + beam_indices (`torch.LongTensor`, *optional*): + Beam indices indicating to which beam hypothesis each token correspond. + decoder_prompt_len (`int`, *optional*): + The length of prompt that is included in the input to decoder. + Return: + `UserDict`: A dictionary composed of the fields as defined above: + + - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of + all + non-finished beams. + + - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be + added + to the non-finished beam_hypotheses. + - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices + indicating to which beam the next tokens shall be added. + """ + + # add up to the length which the next_scores is calculated on (including decoder prompt) + cur_len = input_ids.shape[-1] + 1 + batch_size = len(self._beam_hyps) + if not (batch_size == (input_ids.shape[0] // self.group_size)): + if self.num_beam_groups > 1: + raise ValueError( + f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " + f"size of {self.group_size} is expected by the beam scorer." + ) + else: + raise ValueError( + f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " + f"{self.group_size} is expected by the beam scorer." + ) + + device = input_ids.device + + next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) + next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) + next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + for batch_idx, beam_hyp in enumerate(self._beam_hyps): + if self._done[batch_idx]: + if self.num_beams < len(beam_hyp): + raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") + if eos_token_id is None or pad_token_id is None: + raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") + # pad the batch + next_beam_scores[batch_idx, :] = 0 + next_beam_tokens[batch_idx, :] = pad_token_id + next_beam_indices[batch_idx, :] = 0 + continue + + # next tokens for this sentence. + beam_idx = 0 + for beam_token_rank, (next_token, next_score, next_index) in enumerate( + zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) + ): + batch_beam_idx = batch_idx * self.group_size + next_index + # add to generated hypotheses if end of sentence + if (eos_token_id is not None) and (next_token.item() in eos_token_id): + # if beam_token does not belong to top num_beams tokens, it should not be added + is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size + if is_beam_token_worse_than_top_num_beams: + continue + + completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist()) + if completes_constraint: + if beam_indices is not None: + beam_index = beam_indices[batch_beam_idx] + beam_index = beam_index + (batch_beam_idx,) + else: + beam_index = None + + beam_hyp.add( + input_ids[batch_beam_idx].clone(), + next_score.item(), + beam_indices=beam_index, + generated_len=cur_len - decoder_prompt_len, + ) + else: + # add next predicted token since it is not eos_token + next_beam_scores[batch_idx, beam_idx] = next_score + next_beam_tokens[batch_idx, beam_idx] = next_token + next_beam_indices[batch_idx, beam_idx] = batch_beam_idx + beam_idx += 1 + + # once the beam for next step is full, don't add more tokens to it. + if beam_idx == self.group_size: + break + + new_scores, new_tokens, new_indices = self.step_sentence_constraint( + batch_idx, + input_ids, + scores_for_all_vocab, + next_beam_scores[batch_idx], + next_beam_tokens[batch_idx], + next_beam_indices[batch_idx], + ) + + next_beam_scores[batch_idx] = new_scores + next_beam_tokens[batch_idx] = new_tokens + next_beam_indices[batch_idx] = new_indices + + if beam_idx < self.group_size: + raise ValueError( + f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" + f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." + ) + + # Check if we are done so that we can save a pad step if all(done) + self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done( + next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len + ) + + return UserDict( + { + "next_beam_scores": next_beam_scores.view(-1), + "next_beam_tokens": next_beam_tokens.view(-1), + "next_beam_indices": next_beam_indices.view(-1), + } + ) + + def step_sentence_constraint( + self, + batch_idx: int, + input_ids: torch.LongTensor, + vocab_scores: torch.FloatTensor, + sent_beam_scores: torch.FloatTensor, + sent_beam_tokens: torch.LongTensor, + sent_beam_indices: torch.LongTensor, + push_progress: bool = False, + ): + # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam + # (candidate next tokens) + + # 1. Adding "advance_tokens" + # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will + # advance us in fulfilling the constraints. + + # 2. Selecting best candidates such that we end up with highest probable candidates + # that fulfill our constraints. + + orig_len = sent_beam_indices.size(0) + device = sent_beam_indices.device + + # initialize states + topk_contraint_states = self.make_constraint_states(orig_len) + advance_constraint_states = self.make_constraint_states(orig_len) + + sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len + this_batch_input_ids = input_ids[sidx:eidx] + this_batch_token_scores = vocab_scores[sidx:eidx] + full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1) + + # need to make new hypothesis that advance the constraints + track_new = { + "new_seqs": full_hypotheses.tolist(), + "new_states": [], + "new_indices": [], + "new_tokens": [], + "new_scores": [], + } + for seq_idx, pre_seq in enumerate(this_batch_input_ids): + # pre_seq = ith sequence generated before this step. + + # input_ids -> (topk) generic beam search best model next tokens + # -> (advance) constraints forcing the next token + # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of + # hypotheses. + + topk_state = topk_contraint_states[seq_idx] + topk_state.reset(full_hypotheses[seq_idx].cpu().tolist()) + + advance_state = advance_constraint_states[seq_idx] + advance_state.reset(pre_seq.cpu().tolist()) + + if not advance_state.completed: + advance_tokens = torch.LongTensor(advance_state.advance()).to(device) + for advance_token in advance_tokens: + # since adding each `advance_token` leads to a different hypothesis, create new state instance. + new_state = advance_state.copy(stateful=True) + new_state.add(advance_token.cpu().tolist()) + + advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist() + if advance_seq not in track_new["new_seqs"]: + # prevent duplicates, which are basically bound to happen in this process. + track_new["new_seqs"].append(advance_seq) + track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches + track_new["new_tokens"].append(advance_token) + track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token)) + track_new["new_states"].append(new_state) + elif push_progress: + # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that + # actually fulfill our constraints. For example, let constraints == ["loves pies"] and + + # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and" + + # Without this step, if `sent_beam_indices` is something like [1,1], then + # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and + # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is + # the else part of `if constraints_completed[seq_idx]`) + # 3. it ends up simply getting removed from consideration. + + # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways, + # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam + # search times, since completed sequences keep getting removed after all this effort for constrained + # generation. + + # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply + # appending the next likely token in the vocabulary and adding it to the list of hypotheses. + + new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token + advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1) + + advance_state = advance_constraint_states[seq_idx] + + advance_seq = advance_seq.cpu().tolist() + + advance_state.reset(advance_seq) + if advance_seq not in track_new["new_seqs"]: + # but still don't want to have duplicates + track_new["new_seqs"].append(advance_seq) + track_new["new_indices"].append(seq_idx) + track_new["new_tokens"].append(new_token) + track_new["new_scores"].append(new_score) + track_new["new_states"].append(advance_state) + + if len(track_new["new_indices"]) > 0: + new_indices = torch.tensor(track_new["new_indices"]).to(device) + new_tokens = torch.stack(track_new["new_tokens"]).to(device) + new_scores = torch.stack(track_new["new_scores"]).to(device) + + all_states = topk_contraint_states + track_new["new_states"] + all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1) + all_scores = torch.cat((sent_beam_scores, new_scores), -1) + all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device) + + zipped = all_banks * 100 + all_scores + indices = zipped.sort(descending=True).indices + sorted_banks = all_banks[indices] + + # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0} + + counter = -1 + cur_bank = sorted_banks[0] + increments = [] + for bank in sorted_banks: + if bank == cur_bank: + counter += 1 + else: + counter = 0 + cur_bank = bank + increments.append(counter) + rearrangers = torch.tensor(np.argsort(increments, kind="mergesort")) + + indices = indices[rearrangers][:orig_len] + + sent_beam_scores = all_scores[indices] + sent_beam_tokens = all_tokens[indices] + sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices] + + return sent_beam_scores, sent_beam_tokens, sent_beam_indices + + def finalize( + self, + input_ids: torch.LongTensor, + final_beam_scores: torch.FloatTensor, + final_beam_tokens: torch.LongTensor, + final_beam_indices: torch.LongTensor, + max_length: int, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ) -> Tuple[torch.LongTensor]: + batch_size = len(self._beam_hyps) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + # finalize all open beam hypotheses and add to generated hypotheses + for batch_idx, beam_hyp in enumerate(self._beam_hyps): + if self._done[batch_idx]: + continue + + # all open beam hypotheses are added to the beam hypothesis + # beam hypothesis class automatically keeps the best beams + + ids_collect = [] + for beam_id in range(self.num_beams): + batch_beam_idx = batch_idx * self.num_beams + beam_id + final_score = final_beam_scores[batch_beam_idx].item() + final_tokens = input_ids[batch_beam_idx] + + completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist()) + if completes_constraint: + beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None + generated_len = final_tokens.shape[-1] - decoder_prompt_len + beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len) + ids_collect.append(beam_id) + + # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful + # generation. In these cases we simply return the highest scoring outputs. + if len(ids_collect) < self.num_beam_hyps_to_keep: + for beam_id in range(self.num_beams): + if beam_id not in ids_collect: + batch_beam_idx = batch_idx * self.num_beams + beam_id + final_score = final_beam_scores[batch_beam_idx].item() + final_tokens = input_ids[batch_beam_idx] + generated_len = final_tokens.shape[-1] - decoder_prompt_len + beam_hyp.add(final_tokens, final_score, generated_len=generated_len) + if len(ids_collect) >= self.num_beam_hyps_to_keep: + break + + # select the best hypotheses + sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) + best = [] + best_indices = [] + best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) + + # retrieve best hypotheses + for i, beam_hyp in enumerate(self._beam_hyps): + sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0]) + for j in range(self.num_beam_hyps_to_keep): + best_hyp_tuple = sorted_hyps.pop() + best_score = best_hyp_tuple[0] + best_hyp = best_hyp_tuple[1] + best_index = best_hyp_tuple[2] + sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) + + # append to lists + best.append(best_hyp) + + # append indices to list + best_indices.append(best_index) + + best_scores[i * self.num_beam_hyps_to_keep + j] = best_score + + # prepare for adding eos + sent_lengths_max = sent_lengths.max().item() + 1 + + sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max + decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + + if len(best_indices) > 0 and best_indices[0] is not None: + indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + else: + indices = None + + # shorter batches are padded if needed + if sent_lengths.min().item() != sent_lengths.max().item(): + if pad_token_id is None: + raise ValueError("`pad_token_id` has to be defined") + decoded.fill_(pad_token_id) + + if indices is not None: + indices.fill_(-1) + + # fill with hypotheses and eos_token_id if the latter fits in + for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): + decoded[i, : sent_lengths[i]] = hypo + + if indices is not None: + indices[i, : len(best_idx)] = torch.tensor(best_idx) + + if sent_lengths[i] < sent_max_len: + # inserting only the first eos_token_id + decoded[i, sent_lengths[i]] = eos_token_id[0] + + return UserDict( + { + "sequences": decoded, + "sequence_scores": best_scores, + "beam_indices": indices, + } + ) + + +class BeamHypotheses: + def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None): + """ + Initialize n-best list of hypotheses. + """ + self.length_penalty = length_penalty + self.early_stopping = early_stopping + self.max_length = max_length + self.num_beams = num_beams + self.beams = [] + self.worst_score = 1e9 + + if not isinstance(self.early_stopping, bool) and self.max_length is None: + raise ValueError( + "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the" + " BeamScorer class instance at initialization time." + ) + + def __len__(self): + """ + Number of hypotheses in the list. + """ + return len(self.beams) + + def add( + self, + hyp: torch.LongTensor, + sum_logprobs: float, + beam_indices: Optional[torch.LongTensor] = None, + generated_len: Optional[int] = None, + ): + """ + Add a new hypothesis to the list. + """ + if generated_len is not None: + score = sum_logprobs / (generated_len**self.length_penalty) + # This 'else' case exists for retrocompatibility + else: + score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) + + if len(self) < self.num_beams or score > self.worst_score: + self.beams.append((score, hyp, beam_indices)) + if len(self) > self.num_beams: + sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)]) + del self.beams[sorted_next_scores[0][1]] + self.worst_score = sorted_next_scores[1][0] + else: + self.worst_score = min(score, self.worst_score) + + def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool: + """ + If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst + one in the heap, then we are done with this sentence. + """ + + if len(self) < self.num_beams: + return False + + # `True`: stop as soon as at least `num_beams` hypotheses are finished + if self.early_stopping is True: + return True + # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate + # when `length_penalty` is positive. See the discussion below for more details. + # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 + elif self.early_stopping is False: + highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty + ret = self.worst_score >= highest_attainable_score + return ret + # `"never"`: compute the best possible score, depending on the signal of `length_penalty` + else: + # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min + # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain + # its max this way + if self.length_penalty > 0.0: + if self.max_length <= decoder_prompt_len: + raise ValueError("max_length is not larger than decoder prompt length") + highest_attainable_score = ( + best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty + ) + # the opposite logic applies here (max `highest_attainable_score` from `cur_len`) + else: + highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty + ret = self.worst_score >= highest_attainable_score + return ret diff --git a/modified/generation/configuration_utils.py b/modified/generation/configuration_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4818ca8d97b7f192e2cb1a2a1a30b473b452db1d --- /dev/null +++ b/modified/generation/configuration_utils.py @@ -0,0 +1,977 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Generation configuration class and utilities.""" + +import copy +import json +import os +import warnings +from typing import Any, Dict, Optional, Union + +from .. import __version__ +from ..configuration_utils import PretrainedConfig +from ..utils import ( + GENERATION_CONFIG_NAME, + PushToHubMixin, + cached_file, + download_url, + extract_commit_hash, + is_remote_url, + logging, +) + + +logger = logging.get_logger(__name__) +METADATA_FIELDS = ("_from_model_config", "_commit_hash", "_original_object_hash", "transformers_version") + + +class GenerationConfig(PushToHubMixin): + # no-format + r""" + Class that holds a configuration for a generation task. A `generate` call supports the following generation methods + for text-decoder, text-to-text, speech-to-text, and vision-to-text models: + + - *greedy decoding* by calling [`~generation.GenerationMixin.greedy_search`] if `num_beams=1` and + `do_sample=False` + - *contrastive search* by calling [`~generation.GenerationMixin.contrastive_search`] if `penalty_alpha>0.` + and `top_k>1` + - *multinomial sampling* by calling [`~generation.GenerationMixin.sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.GenerationMixin.beam_search`] if `num_beams>1` and + `do_sample=False` + - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin.beam_sample`] if + `num_beams>1` and `do_sample=True` + - *diverse beam-search decoding* by calling [`~generation.GenerationMixin.group_beam_search`], if + `num_beams>1` and `num_beam_groups>1` + - *constrained beam-search decoding* by calling [`~generation.GenerationMixin.constrained_beam_search`], if + `constraints!=None` or `force_words_ids!=None` + - *assisted decoding* by calling [`~generation.GenerationMixin.assisted_decoding`], if + `assistant_model` is passed to `.generate()` + + You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn + more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + + + + A large number of these flags control the logits or the stopping criteria of the generation. Make sure you check + the [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full + description of the possible manipulations, as well as examples of their usage. + + + + Arg: + > Parameters that control the length of the output + + max_length (`int`, *optional*, defaults to 20): + The maximum length the generated tokens can have. Corresponds to the length of the input prompt + + `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set. + max_new_tokens (`int`, *optional*): + The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. + min_length (`int`, *optional*, defaults to 0): + The minimum length of the sequence to be generated. Corresponds to the length of the input prompt + + `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set. + min_new_tokens (`int`, *optional*): + The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt. + early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: + `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very unlikely to find better candidates; + `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical + beam search algorithm). + max_time(`float`, *optional*): + The maximum amount of time you allow the computation to run for in seconds. generation will still finish + the current pass after allocated time has been passed. + + > Parameters that control the generation strategy used + + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + num_beams (`int`, *optional*, defaults to 1): + Number of beams for beam search. 1 means no beam search. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + penalty_alpha (`float`, *optional*): + The values balance the model confidence and the degeneration penalty in contrastive search decoding. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should use the past last key/values attentions (if applicable to the model) to + speed up decoding. + + > Parameters for manipulation of the model output logits + + temperature (`float`, *optional*, defaults to 1.0): + The value used to modulate the next token probabilities. + top_k (`int`, *optional*, defaults to 50): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional*, defaults to 1.0): + If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to + `top_p` or higher are kept for generation. + typical_p (`float`, *optional*, defaults to 1.0): + Local typicality measures how similar the conditional probability of predicting a target token next is to + the expected conditional probability of predicting a random token next, given the partial text already + generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that + add up to `typical_p` or higher are kept for generation. See [this + paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. + epsilon_cutoff (`float`, *optional*, defaults to 0.0): + If set to float strictly between 0 and 1, only tokens with a conditional probability greater than + `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the + size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://arxiv.org/abs/2210.15191) for more details. + eta_cutoff (`float`, *optional*, defaults to 0.0): + Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between + 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) * + exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token + probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3, + depending on the size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://arxiv.org/abs/2210.15191) for more details. + diversity_penalty (`float`, *optional*, defaults to 0.0): + This value is subtracted from a beam's score if it generates a token same as any beam from other group at a + particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. + repetition_penalty (`float`, *optional*, defaults to 1.0): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + encoder_repetition_penalty (`float`, *optional*, defaults to 1.0): + The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the + original input. 1.0 means no penalty. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + no_repeat_ngram_size (`int`, *optional*, defaults to 0): + If set to int > 0, all ngrams of that size can only occur once. + bad_words_ids(`List[List[int]]`, *optional*): + List of list of token ids that are not allowed to be generated. Check + [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples. + force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*): + List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of + words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this + triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one + can allow different forms of each word. + renormalize_logits (`bool`, *optional*, defaults to `False`): + Whether to renormalize the logits after applying all the logits processors or warpers (including the custom + ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits + are normalized but some logit processors or warpers break the normalization. + constraints (`List[Constraint]`, *optional*): + Custom constraints that can be added to the generation to ensure that the output will contain the use of + certain tokens as defined by `Constraint` objects, in the most sensible way possible. + forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`): + The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for + multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target + language token. + forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`): + The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a + list to set multiple *end-of-sequence* tokens. + remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`): + Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. + Note that using `remove_invalid_values` can slow down generation. + exponential_decay_length_penalty (`tuple(int, float)`, *optional*): + This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been + generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where + penalty starts and `decay_factor` represents the factor of exponential decay + suppress_tokens (`List[int]`, *optional*): + A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their + log probs to `-inf` so that they are not sampled. + begin_suppress_tokens (`List[int]`, *optional*): + A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit + processor will set their log probs to `-inf` so that they are not sampled. + forced_decoder_ids (`List[List[int]]`, *optional*): + A list of pairs of integers which indicates a mapping from generation indices to token indices that will be + forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token + of index 123. + sequence_bias (`Dict[Tuple[int], float]`, *optional*)): + Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the + sequence being selected, while negative biases do the opposite. Check + [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples. + guidance_scale (`float`, *optional*): + The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. + Higher guidance scale encourages the model to generate samples that are more closely linked to the input + prompt, usually at the expense of poorer quality. + low_memory (`bool`, *optional*): + Switch to sequential topk for contrastive search to reduce peak memory. Used with contrastive search. + + + > Parameters that define the output variables of `generate` + + num_return_sequences(`int`, *optional*, defaults to 1): + The number of independently computed returned sequences for each element in the batch. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + > Special tokens that can be used at generation time + + pad_token_id (`int`, *optional*): + The id of the *padding* token. + bos_token_id (`int`, *optional*): + The id of the *beginning-of-sequence* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + > Generation parameters exclusive to encoder-decoder models + + encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0): + If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the + `decoder_input_ids`. + decoder_start_token_id (`int`, *optional*): + If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. + + > Generation parameters exclusive to [assistant generation](https://arxiv.org/abs/2211.17192) + + num_assistant_tokens (`int`, *optional*, defaults to 5): + Defines the number of _speculative tokens_ that shall be generated by the assistant model before being + checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation + more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant + model requires lots of corrections, lower speed-ups are reached. + + num_assistant_tokens_schedule (`str`, *optional*, defaults to `"heuristic"`): + Defines the schedule at which max assistant tokens shall be changed during inference. + - `"_heuristic_`: When all _speculative_ tokens are correct, increase `num_assistant_tokens` by 2 else + reduce by 1 + - `"constant"`: `num_assistant_tokens` stays unchanged during generation + + > Wild card + + generation_kwargs: + Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not + present in `generate`'s signature will be used in the model forward pass. + """ + + def __init__(self, **kwargs): + # Parameters that control the length of the output + # if the default `max_length` is updated here, make sure to update the `generate` tests following https://github.com/huggingface/transformers/pull/25030 + self.max_length = kwargs.pop("max_length", 20) + self.max_new_tokens = kwargs.pop("max_new_tokens", None) + self.min_length = kwargs.pop("min_length", 0) + self.min_new_tokens = kwargs.pop("min_new_tokens", None) + self.early_stopping = kwargs.pop("early_stopping", False) + self.max_time = kwargs.pop("max_time", None) + + # Parameters that control the generation strategy used + self.do_sample = kwargs.pop("do_sample", False) + self.num_beams = kwargs.pop("num_beams", 1) + self.num_beam_groups = kwargs.pop("num_beam_groups", 1) + self.penalty_alpha = kwargs.pop("penalty_alpha", None) + self.use_cache = kwargs.pop("use_cache", True) + + # Parameters for manipulation of the model output logits + self.temperature = kwargs.pop("temperature", 1.0) + self.top_k = kwargs.pop("top_k", 50) + self.top_p = kwargs.pop("top_p", 1.0) + self.typical_p = kwargs.pop("typical_p", 1.0) + self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0) + self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0) + self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0) + self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) + self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0) + self.length_penalty = kwargs.pop("length_penalty", 1.0) + self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) + self.bad_words_ids = kwargs.pop("bad_words_ids", None) + self.force_words_ids = kwargs.pop("force_words_ids", None) + self.renormalize_logits = kwargs.pop("renormalize_logits", False) + self.constraints = kwargs.pop("constraints", None) + self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None) + self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None) + self.remove_invalid_values = kwargs.pop("remove_invalid_values", False) + self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None) + self.suppress_tokens = kwargs.pop("suppress_tokens", None) + self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None) + self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None) + self.sequence_bias = kwargs.pop("sequence_bias", None) + self.guidance_scale = kwargs.pop("guidance_scale", None) + self.low_memory = kwargs.pop("low_memory", None) + + # Parameters that define the output variables of `generate` + self.num_return_sequences = kwargs.pop("num_return_sequences", 1) + self.output_attentions = kwargs.pop("output_attentions", False) + self.output_hidden_states = kwargs.pop("output_hidden_states", False) + self.output_scores = kwargs.pop("output_scores", False) + self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False) + + # Special tokens that can be used at generation time + self.pad_token_id = kwargs.pop("pad_token_id", None) + self.bos_token_id = kwargs.pop("bos_token_id", None) + self.eos_token_id = kwargs.pop("eos_token_id", None) + + # Generation parameters exclusive to encoder-decoder models + self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0) + self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) + + # Assistant generation + self.num_assistant_tokens = kwargs.pop("num_assistant_tokens", 5) + self.num_assistant_tokens_schedule = kwargs.pop("num_assistant_tokens_schedule", "heuristic") + + # Wild card + self.generation_kwargs = kwargs.pop("generation_kwargs", {}) + + # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub + # interface. + self._from_model_config = kwargs.pop("_from_model_config", False) + self._commit_hash = kwargs.pop("_commit_hash", None) + self.transformers_version = kwargs.pop("transformers_version", __version__) + + # Additional attributes without default values + if not self._from_model_config: + # we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a + # model's default configuration file + for key, value in kwargs.items(): + try: + setattr(self, key, value) + except AttributeError as err: + logger.error(f"Can't set {key} with value {value} for {self}") + raise err + + # Validate the values of the attributes + self.validate(is_init=True) + + def __hash__(self): + return hash(self.to_json_string(ignore_metadata=True)) + + def __eq__(self, other): + if not isinstance(other, GenerationConfig): + return False + + self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True) + other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True) + return self_without_metadata == other_without_metadata + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}" + + def validate(self, is_init=False): + """ + Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence + of parameterization that can be detected as incorrect from the configuration instance alone. + + Note that some parameters are best validated at generate runtime, as they may depend on other inputs and/or the + model, such as parameters related to the generation length. + """ + + # Validation of individual attributes + if self.early_stopping not in {True, False, "never"}: + raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.") + + # Validation of attribute relations: + fix_location = "" + if is_init: + fix_location = ( + " This was detected when initializing the generation config instance, which means the corresponding " + "file may hold incorrect parameterization and should be fixed." + ) + + # 1. detect sampling-only parameterization when not in sampling mode + if self.do_sample is False: + greedy_wrong_parameter_msg = ( + "`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only " + "used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`." + + fix_location + ) + if self.temperature != 1.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="temperature", flag_value=self.temperature), + UserWarning, + ) + if self.top_p != 1.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p), + UserWarning, + ) + if self.typical_p != 1.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="typical_p", flag_value=self.typical_p), + UserWarning, + ) + if self.top_k != 50 and self.penalty_alpha is None: # contrastive search uses top_k + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="top_k", flag_value=self.top_k), + UserWarning, + ) + if self.epsilon_cutoff != 0.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="epsilon_cutoff", flag_value=self.epsilon_cutoff), + UserWarning, + ) + if self.eta_cutoff != 0.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="eta_cutoff", flag_value=self.eta_cutoff), + UserWarning, + ) + + # 2. detect beam-only parameterization when not in beam mode + if self.num_beams is None: + warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning) + self.num_beams = 1 + + if self.num_beams == 1: + single_beam_wrong_parameter_msg = ( + "`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used " + "in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`." + fix_location + ) + if self.early_stopping is not False: + warnings.warn( + single_beam_wrong_parameter_msg.format(flag_name="early_stopping", flag_value=self.early_stopping), + UserWarning, + ) + if self.num_beam_groups != 1: + warnings.warn( + single_beam_wrong_parameter_msg.format( + flag_name="num_beam_groups", flag_value=self.num_beam_groups + ), + UserWarning, + ) + if self.diversity_penalty != 0.0: + warnings.warn( + single_beam_wrong_parameter_msg.format( + flag_name="diversity_penalty", flag_value=self.diversity_penalty + ), + UserWarning, + ) + if self.length_penalty != 1.0: + warnings.warn( + single_beam_wrong_parameter_msg.format(flag_name="length_penalty", flag_value=self.length_penalty), + UserWarning, + ) + if self.constraints is not None: + warnings.warn( + single_beam_wrong_parameter_msg.format(flag_name="constraints", flag_value=self.constraints), + UserWarning, + ) + + # 3. detect incorrect paramaterization specific to advanced beam modes + else: + # constrained beam search + if self.constraints is not None: + constrained_wrong_parameter_msg = ( + "`constraints` is not `None`, triggering constrained beam search. However, `{flag_name}` is set " + "to `{flag_value}`, which is incompatible with this generation mode. Set `constraints=None` or " + "unset `{flag_name}` to continue." + fix_location + ) + if self.do_sample is True: + raise ValueError( + constrained_wrong_parameter_msg.format(flag_name="do_sample", flag_value=self.do_sample) + ) + if self.num_beam_groups != 1: + raise ValueError( + constrained_wrong_parameter_msg.format( + flag_name="num_beam_groups", flag_value=self.num_beam_groups + ) + ) + # group beam search + if self.diversity_penalty != 0.0 or self.num_beam_groups != 1: + group_error_prefix = ( + "`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In " + "this generation mode, " + ) + if self.do_sample is True: + raise ValueError(group_error_prefix + "`do_sample` must be set to `False`") + if self.num_beams % self.num_beam_groups != 0: + raise ValueError(group_error_prefix + "`num_beams` should be divisible by `num_beam_groups`") + if self.diversity_penalty == 0.0: + raise ValueError( + group_error_prefix + + "`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical." + ) + + # 4. check `num_return_sequences` + if self.num_return_sequences != 1: + if self.num_beams == 1: + if self.do_sample is False: + raise ValueError( + "Greedy methods without beam search do not support `num_return_sequences` different than 1 " + f"(got {self.num_return_sequences})." + ) + elif self.num_return_sequences > self.num_beams: + raise ValueError( + f"`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` " + f"({self.num_beams})." + ) + + # 5. check common issue: passing `generate` arguments inside the generation config + generate_arguments = ( + "logits_processor", + "stopping_criteria", + "prefix_allowed_tokens_fn", + "synced_gpus", + "assistant_model", + "streamer", + "negative_prompt_ids", + "negative_prompt_attention_mask", + ) + for arg in generate_arguments: + if hasattr(self, arg): + raise ValueError( + f"Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to " + "`generate()` (or a pipeline) directly." + ) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + config_file_name: Optional[Union[str, os.PathLike]] = None, + push_to_hub: bool = False, + **kwargs, + ): + r""" + Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the + [`~GenerationConfig.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): + Name of the generation configuration JSON file to be saved in `save_directory`. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + + # At save time, validate the instance -- if any warning/exception is thrown, we refuse to save the instance + try: + with warnings.catch_warnings(record=True) as caught_warnings: + self.validate() + for w in caught_warnings: + raise ValueError(w.message) + except ValueError as exc: + warnings.warn( + "The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. " + "Fix these issues to save the configuration. This warning will be raised to an exception in v4.34." + "\n\nThrown during validation:\n" + str(exc), + UserWarning, + ) + return + + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME + + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + output_config_file = os.path.join(save_directory, config_file_name) + + self.to_json_file(output_config_file, use_diff=True) + logger.info(f"Configuration saved in {output_config_file}") + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=kwargs.get("token"), + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name: Union[str, os.PathLike], + config_file_name: Optional[Union[str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ) -> "GenerationConfig": + r""" + Instantiate a [`GenerationConfig`] from a generation configuration file. + + Args: + pretrained_model_name (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or + namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. + config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): + Name of the generation configuration JSON file to be loaded from `pretrained_model_name`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if + they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final configuration object. + + If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a + dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the + part of `kwargs` which has not been used to update `config` and is otherwise ignored. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are configuration attributes will be used to override the loaded + values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled + by the `return_unused_kwargs` keyword parameter. + + Returns: + [`GenerationConfig`]: The configuration object instantiated from this pretrained model. + + Examples: + + ```python + >>> from transformers import GenerationConfig + + >>> # Download configuration from huggingface.co and cache. + >>> generation_config = GenerationConfig.from_pretrained("gpt2") + + >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')* + >>> generation_config.save_pretrained("./test/saved_model/") + >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/") + + >>> # You can also specify configuration names to your generation configuration file + >>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json") + >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json") + + >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation + >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored + >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained( + ... "gpt2", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True + ... ) + >>> generation_config.top_k + 1 + + >>> unused_kwargs + {'foo': False} + ```""" + config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME + + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + use_auth_token = kwargs.pop("use_auth_token", None) + subfolder = kwargs.pop("subfolder", "") + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + commit_hash = kwargs.pop("_commit_hash", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + user_agent = {"file_type": "config", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + config_path = os.path.join(pretrained_model_name, config_file_name) + config_path = str(config_path) + + is_local = os.path.exists(config_path) + if os.path.isfile(os.path.join(subfolder, config_path)): + # Special case when config_path is a local file + resolved_config_file = config_path + is_local = True + elif is_remote_url(config_path): + configuration_file = config_path + resolved_config_file = download_url(config_path) + else: + configuration_file = config_file_name + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_config_file = cached_file( + pretrained_model_name, + configuration_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the same" + f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory" + f" containing a {configuration_file} file" + ) + + try: + # Load config dict + config_dict = cls._dict_from_json_file(resolved_config_file) + config_dict["_commit_hash"] = commit_hash + except (json.JSONDecodeError, UnicodeDecodeError): + raise EnvironmentError( + f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." + ) + + if is_local: + logger.info(f"loading configuration file {resolved_config_file}") + else: + logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") + + if kwargs.get("return_unused_kwargs") is True: + config, unused_kwargs = cls.from_dict(config_dict, **kwargs) + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config, unused_kwargs + else: + config = cls.from_dict(config_dict, **kwargs) + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config + + @classmethod + def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig": + """ + Instantiates a [`GenerationConfig`] from a Python dictionary of parameters. + + Args: + config_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the configuration object. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the configuration object. + + Returns: + [`GenerationConfig`]: The configuration object instantiated from those parameters. + """ + return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) + # Those arguments may be passed along for our internal telemetry. + # We remove them so they don't appear in `return_unused_kwargs`. + kwargs.pop("_from_auto", None) + kwargs.pop("_from_pipeline", None) + # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update. + if "_commit_hash" in kwargs and "_commit_hash" in config_dict: + kwargs["_commit_hash"] = config_dict["_commit_hash"] + + # The line below allows model-specific config to be loaded as well through kwargs, with safety checks. + # See https://github.com/huggingface/transformers/pull/21269 + config = cls(**{**config_dict, **kwargs}) + unused_kwargs = config.update(**kwargs) + + logger.info(f"Generate config {config}") + if return_unused_kwargs: + return config, unused_kwargs + else: + return config + + def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None: + """ + Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None, + converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* + string, which can then be stored in the json format. + """ + if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str): + d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1] + for value in d.values(): + if isinstance(value, dict): + self.dict_torch_dtype_to_str(value) + + def to_diff_dict(self) -> Dict[str, Any]: + """ + Removes all attributes from config which correspond to the default config attributes for better readability and + serializes to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, + """ + config_dict = self.to_dict() + + # get the default config dict + default_config_dict = GenerationConfig().to_dict() + + serializable_config_dict = {} + + # only serialize values that differ from the default config + for key, value in config_dict.items(): + if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]: + serializable_config_dict[key] = value + + self.dict_torch_dtype_to_str(serializable_config_dict) + return serializable_config_dict + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + + # Fields to ignore at serialization time + if "_commit_hash" in output: + del output["_commit_hash"] + if "_original_object_hash" in output: + del output["_original_object_hash"] + + # Transformers version when serializing this file + output["transformers_version"] = __version__ + + self.dict_torch_dtype_to_str(output) + return output + + def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str: + """ + Serializes this instance to a JSON string. + + Args: + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `GenerationConfig()` + is serialized to JSON string. + ignore_metadata (`bool`, *optional*, defaults to `False`): + Whether to ignore the metadata fields present in the instance + + Returns: + `str`: String containing all the attributes that make up this configuration instance in JSON format. + """ + if use_diff is True: + config_dict = self.to_diff_dict() + else: + config_dict = self.to_dict() + + if ignore_metadata: + for metadata_field in METADATA_FIELDS: + config_dict.pop(metadata_field, None) + + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this configuration instance's parameters will be saved. + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `GenerationConfig()` + is serialized to JSON file. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string(use_diff=use_diff)) + + @classmethod + def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig": + """ + Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy + [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`]. + + Args: + model_config (`PretrainedConfig`): + The model config that will be used to instantiate the generation config. + + Returns: + [`GenerationConfig`]: The configuration object instantiated from those parameters. + """ + config_dict = model_config.to_dict() + config_dict.pop("_from_model_config", None) + config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True) + + # Special case: some models have generation attributes set in the decoder. Use them if still unset in the + # generation config. + for decoder_name in ("decoder", "generator", "text_config"): + if decoder_name in config_dict: + default_generation_config = GenerationConfig() + decoder_config = config_dict[decoder_name] + for attr in config.to_dict().keys(): + if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr): + setattr(config, attr, decoder_config[attr]) + + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config + + def update(self, **kwargs): + """ + Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes, + returning all the unused kwargs. + + Args: + kwargs (`Dict[str, Any]`): + Dictionary of attributes to tentatively update this class. + + Returns: + `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. + """ + to_remove = [] + for key, value in kwargs.items(): + if hasattr(self, key): + setattr(self, key, value) + to_remove.append(key) + + # remove all the attributes that were updated, without modifying the input dict + unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} + return unused_kwargs diff --git a/modified/generation/flax_logits_process.py b/modified/generation/flax_logits_process.py new file mode 100644 index 0000000000000000000000000000000000000000..5c30b92755a4261654a7b7c930d07c0c6859c4a5 --- /dev/null +++ b/modified/generation/flax_logits_process.py @@ -0,0 +1,457 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect + +import jax +import jax.lax as lax +import jax.numpy as jnp + +from ..utils import add_start_docstrings +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" + Args: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam + search or log softmax for each vocabulary token when using beam search + kwargs (`Dict[str, Any]`, *optional*): + Additional logits processor specific kwargs. + + Return: + `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. + +""" + + +class FlaxLogitsProcessor: + """Abstract base class for all logit processors that can be applied during generation.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: + """Flax method for processing logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class FlaxLogitsWarper: + """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: + """Flax method for warping logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class FlaxLogitsProcessorList(list): + """ + This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process + a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each + [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs. + """ + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray: + for processor in self: + function_args = inspect.signature(processor.__call__).parameters + if len(function_args) > 3: + if not all(arg in kwargs for arg in list(function_args.keys())[2:]): + raise ValueError( + f"Make sure that all the required parameters: {list(function_args.keys())} for " + f"{processor.__class__} are passed to the logits processor." + ) + scores = processor(input_ids, scores, cur_len, **kwargs) + else: + scores = processor(input_ids, scores, cur_len) + return scores + + +class FlaxTemperatureLogitsWarper(FlaxLogitsWarper): + r""" + [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution). + + Args: + temperature (`float`): + The value used to module the logits distribution. + """ + + def __init__(self, temperature: float): + if not isinstance(temperature, float) or not (temperature > 0): + raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") + + self.temperature = temperature + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + scores = scores / self.temperature + return scores + + +class FlaxTopPLogitsWarper(FlaxLogitsWarper): + """ + [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. + + Args: + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): + raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.top_p = top_p + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1]) + + mask_scores = jnp.full_like(scores, self.filter_value) + cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1) + score_mask = cumulative_probs < self.top_p + + # include the token that is higher than top_p as well + score_mask = jnp.roll(score_mask, 1) + score_mask |= score_mask.at[:, 0].set(True) + + # min tokens to keep + score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True) + + topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores) + next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1] + + return next_scores + + +class FlaxTopKLogitsWarper(FlaxLogitsWarper): + r""" + [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. + + Args: + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") + + self.top_k = max(top_k, min_tokens_to_keep) + self.filter_value = filter_value + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + batch_size, vocab_size = scores.shape + next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value) + + topk = min(self.top_k, scores.shape[-1]) # Safety check + topk_scores, topk_indices = lax.top_k(scores, topk) + shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten() + topk_scores_flat = topk_scores.flatten() + topk_indices_flat = topk_indices.flatten() + shift + + next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat) + next_scores = next_scores_flat.reshape(batch_size, vocab_size) + return next_scores + + +class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token. + + Args: + bos_token_id (`int`): + The id of the token to force as the first generated token. + """ + + def __init__(self, bos_token_id: int): + self.bos_token_id = bos_token_id + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + new_scores = jnp.full(scores.shape, -float("inf")) + + apply_penalty = 1 - jnp.bool_(cur_len - 1) + + scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores) + + return scores + + +class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. + + Args: + max_length (`int`): + The maximum length of the sequence to be generated. + eos_token_id (`int`): + The id of the token to force as the last generated token when `max_length` is reached. + """ + + def __init__(self, max_length: int, eos_token_id: int): + self.max_length = max_length + self.eos_token_id = eos_token_id + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + new_scores = jnp.full(scores.shape, -float("inf")) + + apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1) + + scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores) + + return scores + + +class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. + + Args: + min_length (`int`): + The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`int`): + The id of the *end-of-sequence* token. + """ + + def __init__(self, min_length: int, eos_token_id: int): + if not isinstance(min_length, int) or min_length < 0: + raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") + + if not isinstance(eos_token_id, int) or eos_token_id < 0: + raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") + + self.min_length = min_length + self.eos_token_id = eos_token_id + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + # create boolean flag to decide if min length penalty should be applied + apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1) + + scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores) + + return scores + + +class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using + `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the + begining of the generation. + + Args: + begin_suppress_tokens (`List[int]`): + Tokens to not sample. + begin_index (`int`): + Index where the tokens are suppressed. + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def __call__(self, input_ids, scores, cur_len: int): + apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index) + + scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores) + + return scores + + +class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs + to be `-inf` so they are not sampled. + + Args: + suppress_tokens (`list`): + Tokens to not sample. + """ + + def __init__(self, suppress_tokens: list): + self.suppress_tokens = list(suppress_tokens) + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + scores = scores.at[..., self.suppress_tokens].set(-float("inf")) + + return scores + + +class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to + token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens + to `-inf` so that they are sampled at their corresponding index. + + Args: + force_token_map (`list`): + Map giving token ids and indices where they will be forced to be sampled. + """ + + def __init__(self, force_token_map): + force_token_map = dict(force_token_map) + # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the + # index of the array corresponds to the index of the token to be forced, for XLA compatibility. + # Indexes without forced tokens will have a negative value. + force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1 + for index, token in force_token_map.items(): + if token is not None: + force_token_array = force_token_array.at[index].set(token) + self.force_token_array = jnp.int32(force_token_array) + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + def _force_token(generation_idx): + batch_size = scores.shape[0] + current_token = self.force_token_array[generation_idx] + + new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf") + updates = jnp.zeros((batch_size, 1), dtype=scores.dtype) + new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token)) + return new_scores + + scores = lax.cond( + cur_len >= self.force_token_array.shape[0], + # If the current length is geq than the length of force_token_array, the processor does nothing. + lambda: scores, + # Otherwise, it may force a certain token. + lambda: lax.cond( + self.force_token_array[cur_len] >= 0, + # Only valid (positive) tokens are forced + lambda: _force_token(cur_len), + # Otherwise, the processor does nothing. + lambda: scores, + ), + ) + return scores + + +class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor): + r""" + Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log + probs to `inf` so that they are sampled at their corresponding index. + + Args: + generate_config (`GenerateConfig`): + The generate config used to generate the output. The following parameters are required: + eos_token_id (`int`, *optional*, defaults to 50257): + The id of the *end-of-sequence* token. + no_timestamps_token_id (`int`, *optional*, defaults to 50363): + The id of the `"<|notimestamps|>"` token. + max_initial_timestamp_index (`int`, *optional*, defaults to 1): + Used to set the maximum value of the initial timestamp. This is used to prevent the model from + predicting timestamps that are too far in the future. + """ + + def __init__(self, generate_config, model_config, decoder_input_length): + self.eos_token_id = generate_config.eos_token_id + self.no_timestamps_token_id = generate_config.no_timestamps_token_id + self.timestamp_begin = generate_config.no_timestamps_token_id + 1 + + self.begin_index = decoder_input_length + 1 + + if generate_config.is_multilingual: + # room for language token and task token + self.begin_index += 2 + if hasattr(generate_config, "max_initial_timestamp_index"): + self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index + else: + self.max_initial_timestamp_index = model_config.vocab_size + if self.max_initial_timestamp_index is None: + self.max_initial_timestamp_index = model_config.vocab_size + + def __call__(self, input_ids, scores, cur_len): + # suppress <|notimestamps|> which is handled by without_timestamps + scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf")) + + def handle_pairs(input_ids_k, scores_k): + last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False) + last_was_timestamp = jnp.where( + input_ids_k[cur_len - 1] >= self.timestamp_begin, + True and last_was_timestamp, + False, + ) + + penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False) + penultimate_was_timestamp = jnp.where( + input_ids_k[cur_len - 2] >= self.timestamp_begin, + True, + penultimate_was_timestamp, + ) + + return jnp.where( + last_was_timestamp, + jnp.where( + penultimate_was_timestamp > 0, + scores_k.at[self.timestamp_begin :].set(-float("inf")), + scores_k.at[: self.eos_token_id].set(-float("inf")), + ), + scores_k, + ) + + scores = jax.vmap(handle_pairs)(input_ids, scores) + + apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False) + apply_max_initial_timestamp = jnp.where( + self.max_initial_timestamp_index is not None, + True and apply_max_initial_timestamp, + False, + ) + + last_allowed = self.timestamp_begin + self.max_initial_timestamp_index + + scores = jnp.where( + apply_max_initial_timestamp, + scores.at[:, last_allowed + 1 :].set(-float("inf")), + scores, + ) + + # if sum of probability over timestamps is above any other token, sample timestamp + logprobs = jax.nn.log_softmax(scores, axis=-1) + + def handle_cumulative_probs(logprobs_k, scores_k): + timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1) + max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin]) + return jnp.where( + timestamp_logprob > max_text_token_logprob, + scores_k.at[: self.timestamp_begin].set(-float("inf")), + scores_k, + ) + + scores = jax.vmap(handle_cumulative_probs)(logprobs, scores) + + return scores diff --git a/modified/generation/flax_utils.py b/modified/generation/flax_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4fce8970f8647c7902a203fd7a176b17ac68660e --- /dev/null +++ b/modified/generation/flax_utils.py @@ -0,0 +1,1020 @@ +# coding=utf-8 +# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import copy +import inspect +import warnings +from functools import partial +from typing import Any, Dict, Optional, Union + +import flax +import jax +import jax.numpy as jnp +import numpy as np +from jax import lax + +from ..models.auto import ( + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, +) +from ..utils import ModelOutput, logging +from .configuration_utils import GenerationConfig +from .flax_logits_process import ( + FlaxForcedBOSTokenLogitsProcessor, + FlaxForcedEOSTokenLogitsProcessor, + FlaxForceTokensLogitsProcessor, + FlaxLogitsProcessorList, + FlaxMinLengthLogitsProcessor, + FlaxSuppressTokensAtBeginLogitsProcessor, + FlaxSuppressTokensLogitsProcessor, + FlaxTemperatureLogitsWarper, + FlaxTopKLogitsWarper, + FlaxTopPLogitsWarper, +) + + +logger = logging.get_logger(__name__) + + +@flax.struct.dataclass +class FlaxGreedySearchOutput(ModelOutput): + """ + Flax Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): + The generated sequences. + """ + + sequences: jnp.ndarray = None + + +@flax.struct.dataclass +class FlaxSampleOutput(ModelOutput): + """ + Flax Base class for outputs of decoder-only generation models using sampling. + + + Args: + sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): + The generated sequences. + """ + + sequences: jnp.ndarray = None + + +@flax.struct.dataclass +class FlaxBeamSearchOutput(ModelOutput): + """ + Flax Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): + The generated sequences. + scores (`jnp.ndarray` of shape `(batch_size,)`): + The scores (log probabilities) of the generated sequences. + """ + + sequences: jnp.ndarray = None + scores: jnp.ndarray = None + + +@flax.struct.dataclass +class GreedyState: + cur_len: jnp.ndarray + sequences: jnp.ndarray + running_token: jnp.ndarray + is_sent_finished: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + + +@flax.struct.dataclass +class SampleState: + cur_len: jnp.ndarray + sequences: jnp.ndarray + running_token: jnp.ndarray + is_sent_finished: jnp.ndarray + prng_key: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + + +@flax.struct.dataclass +class BeamSearchState: + cur_len: jnp.ndarray + running_sequences: jnp.ndarray + running_scores: jnp.ndarray + sequences: jnp.ndarray + scores: jnp.ndarray + is_sent_finished: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + + +class FlaxGenerationMixin: + """ + A class containing all functions for auto-regressive text generation, to be used as a mixin in + [`FlaxPreTrainedModel`]. + + The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for: + - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and + `do_sample=False` + - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and + `do_sample=False` + + You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To + learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + """ + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." + ) + + @staticmethod + def _run_loop_in_debug(cond_fn, body_fn, init_state): + """ + Run generation in untraced mode. This should only be used for debugging purposes. + """ + state = init_state + while cond_fn(state): + state = body_fn(state) + return state + + def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs): + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not (argument.startswith("decoder_") or argument.startswith("cross_attn")) + } + model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs) + return model_kwargs + + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + decoder_start_token_id: int = None, + bos_token_id: int = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ) -> jnp.ndarray: + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + # Only use this arg if not None, otherwise just remove from model_kwargs + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + if decoder_input_ids is not None: + return decoder_input_ids + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0) + + def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: + # retrieve decoder_start_token_id for encoder-decoder models + # fall back to bos_token_id if necessary + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id is not None + else self.generation_config.decoder_start_token_id + ) + bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id + if decoder_start_token_id is not None: + return decoder_start_token_id + elif ( + hasattr(self.config, "decoder") + and hasattr(self.config.decoder, "decoder_start_token_id") + and self.config.decoder.decoder_start_token_id is not None + ): + return self.config.decoder.decoder_start_token_id + elif bos_token_id is not None: + return bos_token_id + elif ( + hasattr(self.config, "decoder") + and hasattr(self.config.decoder, "bos_token_id") + and self.config.decoder.bos_token_id is not None + ): + return self.config.decoder.bos_token_id + raise ValueError( + "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." + ) + + @staticmethod + def _expand_to_num_beams(tensor, num_beams): + return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:]) + + def _adapt_logits_for_beam_search(self, logits): + """ + This function can be overwritten in the specific modeling_flax_.py classes to allow for custom beam + search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`]. + """ + return logits + + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not self.can_generate(): + generate_compatible_mappings = [ + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) + if "kwargs" in model_args or "model_kwargs" in model_args: + model_args |= set(inspect.signature(self.__call__).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + + def generate( + self, + input_ids: jnp.ndarray, + generation_config: Optional[GenerationConfig] = None, + prng_key: Optional[jnp.ndarray] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + **kwargs, + ): + r""" + Generates sequences of token ids for models with a language modeling head. + + Parameters: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + trace (`bool`, *optional*, defaults to `True`): + Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a + considerably slower runtime. + params (`Dict[str, jnp.ndarray]`, *optional*): + Optionally the model parameters can be passed. Can be useful for parallelized generation. + logits_processor (`FlaxLogitsProcessorList `, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`]. + + """ + # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + self._validate_model_class() + + # priority: `generation_config` argument > `model.generation_config` (the default generation config) + if generation_config is None: + # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, + # two conditions must be met + # 1) the generation config must have been created from the model config (`_from_model_config` field); + # 2) the generation config must have seen no modification since its creation (the hash is the same). + if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( + self.generation_config + ): + new_generation_config = GenerationConfig.from_model_config(self.config) + if new_generation_config != self.generation_config: + warnings.warn( + "You have modified the pretrained model configuration to control generation. This is a" + " deprecated strategy to control generation and will be removed soon, in a future version." + " Please use and modify the model generation configuration (see" + " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" + ) + self.generation_config = new_generation_config + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + generation_config.validate() + self._validate_model_kwargs(model_kwargs.copy()) + + logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList() + + # set init values + prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask") is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder: + raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.") + + # decoder-only models should use left-padding for generation (can't be checked with `trace=True`) + if not self.config.is_encoder_decoder and not trace: + if ( + generation_config.pad_token_id is not None + and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0 + ): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + + batch_size = input_ids.shape[0] + + if self.config.is_encoder_decoder: + # add encoder_outputs to model_kwargs + if model_kwargs.get("encoder_outputs") is None: + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs) + # prepare decoder_input_ids for generation + input_ids = self._prepare_decoder_input_ids_for_generation( + batch_size, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + model_kwargs=model_kwargs, + ) + + # Prepare `max_length` depending on other stopping criteria. + input_ids_seq_length = input_ids.shape[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + # 20 is the default max_length of the generation config + warnings.warn( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " + "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + if not has_default_max_length and generation_config.max_length is not None: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + + if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: + raise ValueError( + f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than" + f" the maximum length ({generation_config.max_length})" + ) + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing`max_new_tokens`." + ) + + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + logits_processor=logits_processor, + ) + + if not generation_config.do_sample and generation_config.num_beams == 1: + return self._greedy_search( + input_ids, + generation_config.max_length, + generation_config.pad_token_id, + generation_config.eos_token_id, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + ) + elif generation_config.do_sample and generation_config.num_beams == 1: + logits_warper = self._get_logits_warper(generation_config=generation_config) + return self._sample( + input_ids, + generation_config.max_length, + generation_config.pad_token_id, + generation_config.eos_token_id, + prng_key, + logits_warper=logits_warper, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + ) + elif not generation_config.do_sample and generation_config.num_beams > 1: + # broadcast input_ids & encoder_outputs + input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams) + + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams( + model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams + ) + + for kwarg in ["attention_mask", "decoder_attention_mask"]: + if kwarg in model_kwargs: + model_kwargs[kwarg] = self._expand_to_num_beams( + model_kwargs[kwarg], num_beams=generation_config.num_beams + ) + + return self._beam_search( + input_ids, + generation_config.max_length, + generation_config.pad_token_id, + generation_config.eos_token_id, + length_penalty=generation_config.length_penalty, + early_stopping=generation_config.early_stopping, + logits_processor=logits_processor, + trace=trace, + params=params, + num_return_sequences=generation_config.num_return_sequences, + model_kwargs=model_kwargs, + ) + else: + raise NotImplementedError("`Beam sampling is currently not implemented.") + + def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList: + """ + This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`] + instances used for multinomial sampling. + """ + warpers = FlaxLogitsProcessorList() + + if generation_config.temperature is not None and generation_config.temperature != 1.0: + warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature)) + if generation_config.top_k is not None and generation_config.top_k != 0: + warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1)) + if generation_config.top_p is not None and generation_config.top_p < 1.0: + warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1)) + + return warpers + + def _get_logits_processor( + self, + generation_config: GenerationConfig, + input_ids_seq_length: int, + logits_processor: Optional[FlaxLogitsProcessorList], + ) -> FlaxLogitsProcessorList: + """ + This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`] + instances used to modify the scores of the language model head. + """ + processors = FlaxLogitsProcessorList() + + if ( + generation_config.min_length is not None + and generation_config.eos_token_id is not None + and generation_config.min_length > -1 + ): + processors.append( + FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id) + ) + if generation_config.forced_bos_token_id is not None: + processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) + if generation_config.forced_eos_token_id is not None: + processors.append( + FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) + ) + if generation_config.suppress_tokens is not None: + processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) + if generation_config.begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = ( + begin_index + if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) + else begin_index + 1 + ) + if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0: + # generation starts after the last token that is forced + begin_index += generation_config.forced_decoder_ids[-1][0] + processors.append( + FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) + ) + if generation_config.forced_decoder_ids is not None: + forced_decoder_ids = [ + [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids + ] + processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids)) + processors = self._merge_criteria_processor_list(processors, logits_processor) + + return processors + + def _merge_criteria_processor_list( + self, + default_list: FlaxLogitsProcessorList, + custom_list: FlaxLogitsProcessorList, + ) -> FlaxLogitsProcessorList: + if len(custom_list) == 0: + return default_list + for default in default_list: + for custom in custom_list: + if type(custom) is type(default): + object_type = "logits processor" + raise ValueError( + f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" + f" `generate`, but it has already been created with the values {default}. {default} has been" + " created by passing the corresponding arguments to generate or by the model's config default" + f" values. If you just want to change the default values of {object_type} consider passing" + f" them as arguments to `generate` instead of using a custom {object_type}." + ) + default_list.extend(custom_list) + return default_list + + def _greedy_search( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ): + # init values + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + + batch_size, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) + pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) + cur_len = jnp.array(cur_len) + + # per batch-item holding current token in loop. + sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) + sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) + + # per batch-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) + + # initialize state + state = GreedyState( + cur_len=cur_len, + sequences=sequences, + running_token=input_ids, + is_sent_finished=is_sent_finished, + model_kwargs=model_kwargs, + ) + + def greedy_search_cond_fn(state): + """state termination condition fn.""" + has_reached_max_length = state.cur_len == max_length + all_sequence_finished = jnp.all(state.is_sent_finished) + finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) + return ~finish_generation + + def greedy_search_body_fn(state): + """state update fn.""" + model_outputs = model(state.running_token, params=params, **state.model_kwargs) + logits = model_outputs.logits[:, -1] + + # apply min_length, ... + logits = logits_processor(state.sequences, logits, state.cur_len) + + next_token = jnp.argmax(logits, axis=-1) + + next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished + next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) + next_token = next_token[:, None] + + next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) + next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) + return GreedyState( + cur_len=state.cur_len + 1, + sequences=next_sequences, + running_token=next_token, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + ) + + # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU + if input_ids.shape[1] > 1: + state = greedy_search_body_fn(state) + + if not trace: + state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state) + else: + state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state) + + return FlaxGreedySearchOutput(sequences=state.sequences) + + def _sample( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + prng_key: Optional[jnp.ndarray] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + logits_warper: Optional[FlaxLogitsProcessorList] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ): + # init values + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) + + batch_size, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) + pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) + cur_len = jnp.array(cur_len) + + # per batch-item holding current token in loop. + sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) + sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) + + # per batch-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) + + # initialize state + state = SampleState( + cur_len=cur_len, + sequences=sequences, + running_token=input_ids, + is_sent_finished=is_sent_finished, + prng_key=prng_key, + model_kwargs=model_kwargs, + ) + + def sample_search_cond_fn(state): + """state termination condition fn.""" + has_reached_max_length = state.cur_len == max_length + all_sequence_finished = jnp.all(state.is_sent_finished) + finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) + return ~finish_generation + + def sample_search_body_fn(state): + """state update fn.""" + prng_key, prng_key_next = jax.random.split(state.prng_key) + model_outputs = model(state.running_token, params=params, **state.model_kwargs) + + logits = model_outputs.logits[:, -1] + + # apply min_length, ... + logits = logits_processor(state.sequences, logits, state.cur_len) + # apply top_p, top_k, temperature + logits = logits_warper(logits, logits, state.cur_len) + + next_token = jax.random.categorical(prng_key, logits, axis=-1) + + next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) + next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished + next_token = next_token[:, None] + + next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) + next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) + + return SampleState( + cur_len=state.cur_len + 1, + sequences=next_sequences, + running_token=next_token, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + prng_key=prng_key_next, + ) + + # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU + if input_ids.shape[1] > 1: + state = sample_search_body_fn(state) + + if not trace: + state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state) + else: + state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) + + return FlaxSampleOutput(sequences=state.sequences) + + def _beam_search( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + length_penalty: Optional[float] = None, + early_stopping: Optional[Union[bool, str]] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + num_return_sequences: Optional[int] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ): + """ + This beam search function is heavily inspired by Flax's official example: + https://github.com/google/flax/blob/main/examples/wmt/decode.py + """ + + def flatten_beam_dim(tensor): + """Flattens the first two dimensions of a non-scalar array.""" + # ignore scalars (e.g. cache index) + if tensor.ndim == 0: + return tensor + return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:]) + + def unflatten_beam_dim(tensor, batch_size, num_beams): + """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" + # ignore scalars (e.g. cache index) + if tensor.ndim == 0: + return tensor + return tensor.reshape((batch_size, num_beams) + tensor.shape[1:]) + + def gather_beams(nested, beam_indices, batch_size, new_num_beams): + """ + Gathers the beam slices indexed by beam_indices into new beam array. + """ + batch_indices = jnp.reshape( + jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams) + ) + + def gather_fn(tensor): + # ignore scalars (e.g. cache index) + if tensor.ndim == 0: + return tensor + else: + return tensor[batch_indices, beam_indices] + + return jax.tree_util.tree_map(gather_fn, nested) + + # init values + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty + early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping + num_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences + ) + + batch_size, num_beams, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) + pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) + cur_len = jnp.array(cur_len) + + # record the prompt length of decoder + decoder_prompt_len = input_ids.shape[-1] + + # per batch,beam-item holding current token in loop. + sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) + running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) + running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0)) + + # per batch,beam-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_) + + # per batch,beam-item score, logprobs + running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1]) + scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + + # flatten beam dim + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( + model_kwargs["encoder_outputs"]["last_hidden_state"] + ) + for kwarg in ["attention_mask", "decoder_attention_mask"]: + if kwarg in model_kwargs: + model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg]) + + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs) + + # initialize state + state = BeamSearchState( + cur_len=cur_len, + running_sequences=running_sequences, + running_scores=running_scores, + sequences=sequences, + scores=scores, + is_sent_finished=is_sent_finished, + model_kwargs=model_kwargs, + ) + + def beam_search_cond_fn(state): + """beam search state termination condition fn.""" + + # 1. is less than max length? + not_max_length_yet = state.cur_len < max_length + + # 2. can the new beams still improve? + # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion + # below for more details. + # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 + # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of + # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. + if early_stopping == "never" and length_penalty > 0.0: + best_running_score = state.running_scores[:, :1] / ( + (max_length - decoder_prompt_len) ** length_penalty + ) + else: + best_running_score = state.running_scores[:, :1] / ( + (state.cur_len - decoder_prompt_len) ** length_penalty + ) + worst_finished_score = jnp.where( + state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7) + ) + improvement_still_possible = jnp.any(best_running_score > worst_finished_score) + + # 3. is there still a beam that has not finished? + still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True)) + + return not_max_length_yet & still_open_beam & improvement_still_possible + + def beam_search_body_fn(state, input_ids_length=1): + """beam search state update fn.""" + # 1. Forward current tokens + # Collect the current position slice along length to feed the fast + # autoregressive decoder model. Flatten the beam dimension into batch + # dimension for feeding into the model. + # unflatten beam dimension + # Unflatten beam dimension in attention cache arrays + input_token = flatten_beam_dim( + lax.dynamic_slice( + state.running_sequences, + (0, 0, state.cur_len - input_ids_length), + (batch_size, num_beams, input_ids_length), + ) + ) + model_outputs = model(input_token, params=params, **state.model_kwargs) + + logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams) + cache = jax.tree_util.tree_map( + lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values + ) + + # adapt logits for FlaxMarianMTModel + logits = self._adapt_logits_for_beam_search(logits) + + # 2. Compute log probs + # get log probabilities from logits, + # process logits with processors (*e.g.* min_length, ...), and + # add new logprobs to existing running logprobs scores. + log_probs = jax.nn.log_softmax(logits) + log_probs = logits_processor( + flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len + ) + log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams) + log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2) + vocab_size = log_probs.shape[2] + log_probs = log_probs.reshape((batch_size, num_beams * vocab_size)) + + # 3. Retrieve top-K + # Each item in batch has num_beams * vocab_size candidate sequences. + # For each item, get the top 2*k candidates with the highest log- + # probabilities. We gather the top 2*K beams here so that even if the best + # K sequences reach EOS simultaneously, we have another K sequences + # remaining to continue the live beam search. + # Gather the top 2*K scores from _all_ beams. + # Gather 2*k top beams. + # Recover the beam index by floor division. + # Recover token id by modulo division and expand Id array for broadcasting. + # Update sequences for the 2*K top-k new sequences. + beams_to_keep = 2 * num_beams + topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep) + topk_beam_indices = topk_indices // vocab_size + topk_running_sequences = gather_beams( + state.running_sequences, topk_beam_indices, batch_size, beams_to_keep + ) + topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2) + topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len)) + + # 4. Check which sequences have ended + # Update current sequences: + # Did any of these sequences reach an end marker? + # To prevent these just finished sequences from being added to the current sequences + # set of active beam search sequences, set their log probs to a very large + # negative value. + did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id + running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7) + # 5. Get running sequences scores for next + # Determine the top k beam indices (from top 2*k beams) from log probs + # and gather top k beams (from top 2*k beams). + next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1] + next_running_sequences, next_running_scores = gather_beams( + [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams + ) + + # 6. Process topk logits + # Further process log probs: + # - add length penalty + # - make sure no scores can be added anymore if beam is full + # - make sure still running sequences cannot be chosen as finalized beam + topk_log_probs = topk_log_probs / ((state.cur_len + 1 - decoder_prompt_len) ** length_penalty) + beams_in_batch_are_full = jnp.broadcast_to( + state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape + ) & (early_stopping is True) + add_penalty = ~did_topk_just_finished | beams_in_batch_are_full + topk_log_probs += add_penalty * np.array(-1.0e7) + + # 7. Get scores, sequences, is sentence finished for next. + # Combine sequences, scores, and flags along the beam dimension and compare + # new finished sequence scores to existing finished scores and select the + # best from the new set of beams + merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1) + merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1) + merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1) + topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1] + next_sequences, next_scores, next_is_sent_finished = gather_beams( + [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams + ) + + # 8. Update model kwargs. + # Determine the top k beam indices from the original set of all beams. + # With these, gather the top k beam-associated caches. + next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams) + next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams) + model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache) + next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) + + return BeamSearchState( + cur_len=state.cur_len + 1, + running_scores=next_running_scores, + running_sequences=next_running_sequences, + scores=next_scores, + sequences=next_sequences, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + ) + + # Always run first iteration outside of `lax.while_loop` to avoid calling `beam_search_cond_fn` + # when `state.cur_len` equals `decoder_prompt_len`. This also helps to comply with TPU when + # the very first prompt has sequence length > 1. + state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state) + + if not trace: + state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state) + else: + state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state) + + # Account for the edge-case where there are no finished sequences for a + # particular batch item. If so, return running sequences for that batch item. + none_finished = jnp.any(state.is_sent_finished, axis=1) + sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences) + scores = jnp.where(none_finished[:, None], state.scores, state.running_scores) + + # Take best beams for each batch (the score is sorted in descending order) + sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) + scores = flatten_beam_dim(scores[:, :num_return_sequences]) + + return FlaxBeamSearchOutput(sequences=sequences, scores=scores) diff --git a/modified/generation/logits_process.py b/modified/generation/logits_process.py new file mode 100644 index 0000000000000000000000000000000000000000..4b9b91cd8068d9f53c0ac0cd7acd01cd8d98cf71 --- /dev/null +++ b/modified/generation/logits_process.py @@ -0,0 +1,2143 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..utils import add_start_docstrings +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam + search or log softmax for each vocabulary token when using beam search + + Return: + `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. + +""" + + +class LogitsProcessor: + """Abstract base class for all logit processors that can be applied during generation.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class LogitsWarper: + """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class LogitsProcessorList(list): + """ + This class can be used to create a list of [`LogitsProcessor`] or [`LogitsWarper`] to subsequently process a + `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each + [`LogitsProcessor`] or [`LogitsWarper`] to the inputs. + """ + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + kwargs (`Dict[str, Any]`, *optional*): + Additional kwargs that are specific to a logits processor. + + Return: + `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: + The processed prediction scores. + + """ + for processor in self: + function_args = inspect.signature(processor.__call__).parameters + if len(function_args) > 2: + if not all(arg in kwargs for arg in list(function_args.keys())[2:]): + raise ValueError( + f"Make sure that all the required parameters: {list(function_args.keys())} for " + f"{processor.__class__} are passed to the logits processor." + ) + scores = processor(input_ids, scores, **kwargs) + else: + scores = processor(input_ids, scores) + return scores + + +class MinLengthLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Note that, for decoder-only models + like most LLMs, the length includes the prompt. + + Args: + min_length (`int`): + The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("A number:", return_tensors="pt") + >>> gen_out = model.generate(**inputs) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one + + >>> # setting `min_length` to a value smaller than the uncontrolled output length has no impact + >>> gen_out = model.generate(**inputs, min_length=3) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one + + >>> # setting a larger `min_length` will force the model to generate beyond its natural ending point, which is not + >>> # necessarily incorrect + >>> gen_out = model.generate(**inputs, min_length=10) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one thousand, nine hundred and ninety-four + ``` + """ + + def __init__(self, min_length: int, eos_token_id: Union[int, List[int]]): + if not isinstance(min_length, int) or min_length < 0: + raise ValueError(f"`min_length` has to be a non-negative integer, but is {min_length}") + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id): + logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") + + self.min_length = min_length + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + if cur_len < self.min_length: + for i in self.eos_token_id: + scores[:, i] = -float("inf") + return scores + + +class MinNewTokensLengthLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] enforcing a min-length of new tokens by setting EOS (End-Of-Sequence) token probability to 0. + Contrarily to [`MinLengthLogitsProcessor`], this processor ignores the prompt. + + Args: + prompt_length_to_skip (`int`): + The input tokens length. Not a valid argument when used with `generate` as it will automatically assign the + input length. + min_new_tokens (`int`): + The minimum *new* tokens length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer(["A number:"], return_tensors="pt") + >>> gen_out = model.generate(**inputs) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one + + >>> # setting `min_new_tokens` will force the model to generate beyond its natural ending point, which is not + >>> # necessarily incorrect + >>> gen_out = model.generate(**inputs, min_new_tokens=2) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one thousand + ``` + """ + + def __init__(self, prompt_length_to_skip: int, min_new_tokens: int, eos_token_id: Union[int, List[int]]): + for arg_name, arg_value in [ + ("prompt_length_to_skip", prompt_length_to_skip), + ("min_new_tokens", min_new_tokens), + ]: + if not isinstance(arg_value, int) or arg_value < 0: + raise ValueError(f"`{arg_name}` has to be a positive integer, but is {arg_value}") + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id): + logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") + + self.prompt_length_to_skip = prompt_length_to_skip + self.min_new_tokens = min_new_tokens + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + new_tokens_length = input_ids.shape[-1] - self.prompt_length_to_skip + if new_tokens_length < self.min_new_tokens: + for i in self.eos_token_id: + scores[:, i] = -float("inf") + + return scores + + +class TemperatureLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] for temperature (exponential scaling output probability distribution), which effectively means + that it can control the randomness of the predicted tokens. Often used together with [`TopPLogitsWarper`] and + [`TopKLogitsWarper`]. + + + + Make sure that `do_sample=True` is included in the `generate` arguments otherwise the temperature value won't have + any effect. + + + + Args: + temperature (`float`): + Strictly positive float value used to modulate the logits distribution. A value smaller than `1` decreases + randomness (and vice versa), with `0` being equivalent to shifting all probability mass to the most likely + token. + + Examples: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(0) # for reproducibility + + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> model.config.pad_token_id = model.config.eos_token_id + >>> inputs = tokenizer(["Hugging Face Company is"], return_tensors="pt") + + >>> # With temperature=1.0, the default, we consistently get random outputs due to random sampling. + >>> generate_kwargs = {"max_new_tokens": 10, "do_sample": True, "temperature": 1.0, "num_return_sequences": 2} + >>> outputs = model.generate(**inputs, **generate_kwargs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) + ['Hugging Face Company is a joint venture between GEO Group, one of', + 'Hugging Face Company is not an exact science – but what we believe does'] + + >>> # However, with temperature close to 0, it approximates greedy decoding strategies (invariant) + >>> generate_kwargs["temperature"] = 0.0001 + >>> outputs = model.generate(**inputs, **generate_kwargs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) + ['Hugging Face Company is a company that has been around for over 20 years', + 'Hugging Face Company is a company that has been around for over 20 years'] + ``` + """ + + def __init__(self, temperature: float): + if not isinstance(temperature, float) or not (temperature > 0): + except_msg = ( + f"`temperature` (={temperature}) has to be a strictly positive float, otherwise your next token " + "scores will be invalid." + ) + if isinstance(temperature, float) and temperature == 0.0: + except_msg += " If you're looking for greedy decoding strategies, set `do_sample=False`." + raise ValueError(except_msg) + + self.temperature = temperature + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + scores = scores / self.temperature + return scores + + +class RepetitionPenaltyLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that prevents the repetition of previous tokens through a penalty. This penalty is applied at + most once per token. Note that, for decoder-only models like most LLMs, the considered tokens include the prompt. + + In the original [paper](https://arxiv.org/pdf/1909.05858.pdf), the authors suggest the use of a penalty of around + 1.2 to achieve a good balance between truthful generation and lack of repetition. To penalize and reduce + repetition, use `penalty` values above 1.0, where a higher value penalizes more strongly. To reward and encourage + repetition, use `penalty` values between 0.0 and 1.0, where a lower value rewards more strongly. + + Args: + penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 penalizes previously generated + tokens. Between 0.0 and 1.0 rewards previously generated tokens. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> # Initializing the model and tokenizer for it + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + >>> inputs = tokenizer(["I'm not going to"], return_tensors="pt") + + >>> # This shows a normal generate without any specific parameters + >>> summary_ids = model.generate(**inputs) + >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0]) + I'm not going to be able to do that. I'm going to be able to do that + + >>> # This generates a penalty for repeated tokens + >>> penalized_ids = model.generate(**inputs, repetition_penalty=1.1) + >>> print(tokenizer.batch_decode(penalized_ids, skip_special_tokens=True)[0]) + I'm not going to be able to do that. I'll just have to go out and play + ``` + """ + + def __init__(self, penalty: float): + if not isinstance(penalty, float) or not (penalty > 0): + raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") + + self.penalty = penalty + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + score = torch.gather(scores, 1, input_ids) + + # if score < 0 then repetition penalty has to be multiplied to reduce the token probabilities + score = torch.where(score < 0, score * self.penalty, score / self.penalty) + + scores.scatter_(1, input_ids, score) + return scores + + +class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that works similarly to [`RepetitionPenaltyLogitsProcessor`], but with an *inverse* penalty + that is applied to the tokens present in the prompt. In other words, a penalty above 1.0 increases the odds of + selecting tokens that were present in the prompt. + + It was designed to avoid hallucination in input-grounded tasks, like summarization. Although originally intended + for encoder-decoder models, it can also be used with decoder-only models like LLMs. + + Args: + penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 rewards prompt tokens. Between 0.0 + and 1.0 penalizes prompt tokens. + encoder_input_ids (`torch.LongTensor`): + The encoder_input_ids that should be repeated within the decoder ids. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer(["Alice and Bob. The third member's name was"], return_tensors="pt") + >>> gen_out = model.generate(**inputs) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + Alice and Bob. The third member's name was not mentioned. + + >>> # With the `encoder_repetition_penalty` argument we can trigger this logits processor in `generate`, which can + >>> # promote the use of prompt tokens ("Bob" in this example) + >>> gen_out = model.generate(**inputs, encoder_repetition_penalty=1.2) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + Alice and Bob. The third member's name was Bob. The third member's name was Bob. + ``` + """ + + def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor): + if not isinstance(penalty, float) or not (penalty > 0): + raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") + + self.penalty = 1 / penalty + self.encoder_input_ids = encoder_input_ids + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + score = torch.gather(scores, 1, self.encoder_input_ids) + + # if score < 0 then hallucination penalty has to be multiplied to increase the token probabilities + score = torch.where(score < 0, score * self.penalty, score / self.penalty) + + scores.scatter_(1, self.encoder_input_ids, score) + return scores + + +class TopPLogitsWarper(LogitsWarper): + """ + [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Often + used together with [`TemperatureLogitsWarper`] and [`TopKLogitsWarper`]. + + Args: + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(0) + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 0, 2, 2. 2, 2, 2, 2 + + >>> # With `top_p` sampling, the output gets restricted to high-probability tokens. + >>> # Pro tip: In practice, LLMs use `top_p` in the 0.9-0.95 range. + >>> outputs = model.generate(**inputs, do_sample=True, top_p=0.1) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9 + ``` + """ + + def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + top_p = float(top_p) + if top_p < 0 or top_p > 1.0: + raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.top_p = top_p + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + sorted_logits, sorted_indices = torch.sort(scores, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p) + # Keep at least min_tokens_to_keep + sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0 + + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) + scores = scores.masked_fill(indices_to_remove, self.filter_value) + return scores + + +class TopKLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Often used together + with [`TemperatureLogitsWarper`] and [`TopPLogitsWarper`]. + + Args: + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(0) + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + + >>> inputs = tokenizer("A sequence: A, B, C, D", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: A, B, C, D, G, H, I. A, M + + >>> # With `top_k` sampling, the output gets restricted the k most likely tokens. + >>> # Pro tip: In practice, LLMs use `top_k` in the 5-50 range. + >>> outputs = model.generate(**inputs, do_sample=True, top_k=2) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: A, B, C, D, E, F, G, H, I + ``` + """ + + def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") + + self.top_k = max(top_k, min_tokens_to_keep) + self.filter_value = filter_value + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + top_k = min(self.top_k, scores.size(-1)) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] + scores = scores.masked_fill(indices_to_remove, self.filter_value) + return scores + + +class TypicalLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs typical decoding. Inspired on how humans use language, it prioritizes tokens whose + log probability is close to the entropy of the token probability distribution. This means that the most likely + tokens may be discarded in the process. + + See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information. + + Args: + mass (`float`, *optional*, defaults to 0.9): + Value of typical_p between 0 and 1 inclusive, defaults to 0.9. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("1, 2, 3", return_tensors="pt") + + >>> # We can see that greedy decoding produces a sequence of numbers + >>> outputs = model.generate(**inputs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + + >>> # For this particular seed, we can see that sampling produces nearly the same low-information (= low entropy) + >>> # sequence + >>> set_seed(18) + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + 1, 2, 3, 4, 5, 6, 7, 8, 9 and 10 + + >>> # With `typical_p` set, the most obvious sequence is no longer produced, which may be good for your problem + >>> set_seed(18) + >>> outputs = model.generate( + ... **inputs, do_sample=True, typical_p=0.1, return_dict_in_generate=True, output_scores=True + ... ) + >>> print(tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0]) + 1, 2, 3 and 5 + + >>> # We can see that the token corresponding to "4" (token 934) in the second position, the most likely token + >>> # as seen with greedy decoding, was entirely blocked out + >>> print(outputs.scores[1][0, 934]) + tensor(-inf) + ``` + """ + + def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + mass = float(mass) + if not (mass > 0 and mass < 1): + raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.filter_value = filter_value + self.mass = mass + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # calculate entropy + normalized = torch.nn.functional.log_softmax(scores, dim=-1) + p = torch.exp(normalized) + ent = -(normalized * p).nansum(-1, keepdim=True) + + # shift and sort + shifted_scores = torch.abs((-normalized) - ent) + sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False) + sorted_logits = scores.gather(-1, sorted_indices) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + + # Remove tokens with cumulative mass above the threshold + last_ind = (cumulative_probs < self.mass).sum(dim=1) + last_ind.clamp_(max=sorted_scores.shape[-1] - 1) + sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1)) + sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 + indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) + + scores = scores.masked_fill(indices_to_remove, self.filter_value) + return scores + + +class EpsilonLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs epsilon-sampling, i.e. restricting to tokens with `prob >= epsilon`. Takes the + largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model + Desmoothing](https://arxiv.org/abs/2210.15191) for more information. + + Args: + epsilon (`float`): + If set to > 0, only the most tokens with probabilities `epsilon` or higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(0) + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 0, 2, 2. 2, 2, 2, 2 + + >>> # With epsilon sampling, the output gets restricted to high-probability tokens. Note that this is similar to + >>> # Top P sampling, which restricts tokens based on their cumulative probability. + >>> # Pro tip: The paper recomends using `epsilon_cutoff` values between 3e-4 and 9e-4 + >>> outputs = model.generate(**inputs, do_sample=True, epsilon_cutoff=0.1) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9 + ``` + """ + + def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + epsilon = float(epsilon) + if epsilon <= 0 or epsilon >= 1: + raise ValueError(f"`epsilon_cutoff` has to be a float > 0 and < 1, but is {epsilon}") + + min_tokens_to_keep = int(min_tokens_to_keep) + if min_tokens_to_keep < 1: + raise ValueError( + f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}" + ) + + self.epsilon = epsilon + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # Determine which indices to remove + probabilities = scores.softmax(dim=-1) + indices_to_remove = probabilities < self.epsilon + + # Keep the words with the 'min_tokens_to_keep'-highest probabilities + top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check + indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None]) + + scores = scores.masked_fill(indices_to_remove, self.filter_value) + return scores + + +class EtaLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs eta-sampling, a technique to filter out tokens with probabilities below a dynamic + cutoff value, `eta`, which is calculated based on a combination of the hyperparameter `epsilon` and the entropy of + the token probabilities, i.e. `eta := min(epsilon, sqrt(epsilon * e^-entropy(probabilities)))`. Takes the largest + min_tokens_to_keep tokens if no tokens satisfy this constraint. It addresses the issue of poor quality in long + samples of text generated by neural language models leading to more coherent and fluent text. See [Truncation + Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more information. Note: `do_sample` + must be set to `True` for this `LogitsWarper` to work. + + + Args: + epsilon (`float`): + A float value in the range (0, 1). Hyperparameter used to calculate the dynamic cutoff value, `eta`. The + suggested values from the paper ranges from 3e-4 to 4e-3 depending on the size of the model. + filter_value (`float`, *optional*, defaults to -inf): + All values that are found to be below the dynamic cutoff value, `eta`, are set to this float value. This + parameter is useful when logits need to be modified for very low probability tokens that should be excluded + from generation entirely. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Specifies the minimum number of tokens that must be kept for generation, regardless of their probabilities. + For example, if `min_tokens_to_keep` is set to 1, at least one token will always be kept for generation, + even if all tokens have probabilities below the cutoff `eta`. + + Examples: + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(0) + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 0, 2, 2. 2, 2, 2, 2 + + >>> # With eta sampling, the output gets restricted to high-probability tokens. You can see it as a dynamic form of + >>> # epsilon sampling that adapts its cutoff probability based on the entropy (high entropy = lower cutoff). + >>> # Pro tip: The paper recomends using `eta_cutoff` values between 3e-4 to 4e-3 + >>> outputs = model.generate(**inputs, do_sample=True, eta_cutoff=0.1) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9 + ``` + """ + + def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + epsilon = float(epsilon) + if epsilon <= 0 or epsilon >= 1: + raise ValueError(f"`eta_cutoff` has to be a float > 0 and < 1, but is {epsilon}") + + min_tokens_to_keep = int(min_tokens_to_keep) + if min_tokens_to_keep < 1: + raise ValueError( + f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}" + ) + + self.epsilon = torch.tensor(epsilon) + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # Calculate the adaptive cutoff + probabilities = scores.softmax(dim=-1) + entropy = torch.distributions.Categorical(logits=scores).entropy() + eta = torch.min(self.epsilon, torch.sqrt(self.epsilon) * torch.exp(-entropy))[..., None] + indices_to_remove = probabilities < eta + + # Keep the words with the 'min_tokens_to_keep'-highest probabilities + top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check + indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None]) + + scores = scores.masked_fill(indices_to_remove, self.filter_value) + return scores + + +def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int): + """ + Assume ngram_size=2 and prev_input_ids=tensor([[40, 2883, 2712, 4346]]). The output of generated ngrams look like + this {(40,): [2883], (2883,): [2712], (2712,): [4346]}. + + Args: + ngram_size (`int`): + The number sequential tokens taken as a group which may only occur once before being banned. + prev_input_ids (`torch.Tensor`): + Generated token ids for the current hypothesis. + num_hypos (`int`): + The number of hypotheses for which n-grams need to be generated. + + Returns: + generated_ngrams (`dict`): + Dictionary of generated ngrams. + """ + # Initialize an empty list of dictionaries, one for each hypothesis (index) in the range of num_hypos + generated_ngrams = [{} for _ in range(num_hypos)] + for idx in range(num_hypos): + gen_tokens = prev_input_ids[idx].tolist() + generated_ngram = generated_ngrams[idx] + # Loop through each n-gram of size ngram_size in the list of tokens (gen_tokens) + for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]): + prev_ngram_tuple = tuple(ngram[:-1]) + generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] + return generated_ngrams + + +def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len): + """ + Determines the banned tokens for the current hypothesis based on previously generated n-grams. + + Args: + banned_ngrams (`dict`): + A dictionary containing previously generated n-grams for each hypothesis. + prev_input_ids (`torch.Tensor`): + Generated token ids for the current hypothesis. + ngram_size (`int`): + The number sequential tokens taken as a group which may only occur once before being banned. + cur_len (`int`): + The current length of the token sequences for which the n-grams are being checked. + + Returns: + List of tokens that are banned. + """ + # Before decoding the next token, prevent decoding of ngrams that have already appeared + start_idx = cur_len + 1 - ngram_size + ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist()) + return banned_ngrams.get(ngram_idx, []) + + +def _calc_banned_ngram_tokens( + ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int +) -> List[Iterable[int]]: + """Copied from fairseq for no_repeat_ngram in beam_search""" + if cur_len + 1 < ngram_size: + # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet + return [[] for _ in range(num_hypos)] + generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos) + banned_tokens = [ + _get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len) + for hypo_idx in range(num_hypos) + ] + return banned_tokens + + +class NoRepeatNGramLogitsProcessor(LogitsProcessor): + r""" + N-grams are groups of "n" consecutive words, characters, or tokens taken from a sequence of text. Given the + sentence: "She runs fast", the bi-grams (n=2) would be ("she", "runs") and ("runs", "fast"). In text generation, + avoiding repetitions of word sequences provides a more diverse output. This [`LogitsProcessor`] enforces no + repetition of n-grams by setting the scores of banned tokens to negative infinity which eliminates those tokens + from consideration when further processing the scores. Note that, for decoder-only models like most LLMs, the + prompt is also considered to obtain the n-grams. + [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). + + + + Use n-gram penalties with care. For instance, penalizing 2-grams (bigrams) in an article about the city of New York + might lead to undesirable outcomes where the city's name appears only once in the entire text. + [Reference](https://huggingface.co/blog/how-to-generate) + + + + Args: + ngram_size (`int`): + All ngrams of size `ngram_size` can only occur once. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + >>> inputs = tokenizer(["Today I"], return_tensors="pt") + + >>> output = model.generate(**inputs) + >>> print(tokenizer.decode(output[0], skip_special_tokens=True)) + Today I’m not sure if I’m going to be able to do it. + + >>> # Now let's add ngram size using `no_repeat_ngram_size`. This stops the repetitions ("I’m") in the output. + >>> output = model.generate(**inputs, no_repeat_ngram_size=2) + >>> print(tokenizer.decode(output[0], skip_special_tokens=True)) + Today I’m not sure if I can get a better understanding of the nature of this issue + ``` + """ + + def __init__(self, ngram_size: int): + if not isinstance(ngram_size, int) or ngram_size <= 0: + raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") + self.ngram_size = ngram_size + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + num_batch_hypotheses = scores.shape[0] + cur_len = input_ids.shape[-1] + banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len) + for i, banned_tokens in enumerate(banned_batch_tokens): + scores[i, banned_tokens] = -float("inf") + + return scores + + +class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that works similarly to [`NoRepeatNGramLogitsProcessor`], but applied exclusively to prevent + the repetition of n-grams present in the prompt. + + It was designed to promote chattiness in a language model, by preventing the generation of n-grams present in + previous conversation rounds. + + Args: + encoder_ngram_size (`int`): + All ngrams of size `ngram_size` can only occur within the encoder input ids. + encoder_input_ids (`int`): + The encoder_input_ids that should not be repeated within the decoder ids. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("Alice: I love cats. What do you love?\nBob:", return_tensors="pt") + + >>> # With greedy decoding, we see Bob repeating Alice's opinion. If Bob was a chatbot, it would be a poor one. + >>> outputs = model.generate(**inputs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice: I love cats. What do you love? + Bob: I love cats. What do you + + >>> # With this logits processor, we can prevent Bob from repeating Alice's opinion. + >>> outputs = model.generate(**inputs, encoder_no_repeat_ngram_size=2) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice: I love cats. What do you love? + Bob: My cats are very cute. + ``` + """ + + def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor): + if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0: + raise ValueError( + f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}" + ) + self.ngram_size = encoder_ngram_size + if len(encoder_input_ids.shape) == 1: + encoder_input_ids = encoder_input_ids.unsqueeze(0) + self.batch_size = encoder_input_ids.shape[0] + self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # B x num_beams + num_hypos = scores.shape[0] + num_beams = num_hypos // self.batch_size + cur_len = input_ids.shape[-1] + banned_batch_tokens = [ + _get_generated_ngrams( + self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len + ) + for hypo_idx in range(num_hypos) + ] + + for i, banned_tokens in enumerate(banned_batch_tokens): + scores[i, banned_tokens] = -float("inf") + + return scores + + +class SequenceBiasLogitsProcessor(LogitsProcessor): + """ + [`LogitsProcessor`] that applies an additive bias on sequences. The bias is applied to the last token of a sequence + when the next generated token can complete it. Consequently, to take the most of biasing sequences with more than + one token, consider using beam methods (to gracefully work around partially completed sequences that have a + negative bias) and applying the bias to their prefixes (to ensure the bias is applied earlier). + + + + In order to get the token ids of the sequences that you want to bias, make sure to set `add_prefix_space=True` when + initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The + `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours + come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). + + + + Args: + sequence_bias (`Dict[Tuple[int], float]`): + Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the + sequence being selected, while negative biases do the opposite. If a sequence has a length of 1, its bias + will always be applied. Otherwise, the bias will only be applied if the sequence in question is about to be + completed (in the token selection step after this processor is applied). + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> inputs = tokenizer(["The full name of Donald is Donald"], return_tensors="pt") + + >>> summary_ids = model.generate(inputs["input_ids"], max_new_tokens=4) + >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald J. Trump Jr + + >>> # Now let's control generation through a bias. Please note that the tokenizer is initialized differently! + >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("gpt2", add_prefix_space=True) + + + >>> def get_tokens_as_tuple(word): + ... return tuple(tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0]) + + + >>> # If we add a negative bias without beam search, it may become "stuck" in a prefix without good continuations + >>> sequence_bias = {get_tokens_as_tuple("Trump"): -10.0} + >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, sequence_bias=sequence_bias) + >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald J. Donald, + + >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias) + >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald Rumsfeld, + + >>> # We can also add a positive bias to nudge the model towards specific tokens or continuations + >>> sequence_bias = {get_tokens_as_tuple("Donald Duck"): 10.0} + >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias) + >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald Duck. + ``` + """ + + def __init__(self, sequence_bias: Dict[Tuple[int], float]): + self.sequence_bias = sequence_bias + self._validate_arguments() + + # Bias variables that will be populated on the first call (for retrocompatibility purposes, the vocabulary size + # is infered in the first usage, which inhibits initializing here) + self.length_1_bias = None + self.prepared_bias_variables = False + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # 1 - Prepares the bias tensors. This is only needed the first time the logit processor is called. + if not self.prepared_bias_variables: + self._prepare_bias_variables(scores) + + # 2 - prepares an empty bias to add + bias = torch.zeros_like(scores) + + # 3 - include the bias from length = 1 + bias += self.length_1_bias + + # 4 - include the bias from length > 1, after determining which biased sequences may be completed. + for sequence_ids, sequence_bias in self.sequence_bias.items(): + if len(sequence_ids) == 1: # the sequence is of length 1, already applied + continue + if len(sequence_ids) > input_ids.shape[1]: # the sequence is longer than the context, ignore + continue + prefix_length = len(sequence_ids) - 1 + last_token = sequence_ids[-1] + matching_rows = torch.eq( + input_ids[:, -prefix_length:], + torch.tensor(sequence_ids[:-1], dtype=input_ids.dtype, device=input_ids.device), + ).prod(dim=1) + bias[:, last_token] += torch.where( + matching_rows.bool(), + torch.tensor(sequence_bias, device=input_ids.device), + torch.tensor(0.0, device=input_ids.device), + ) + + # 5 - apply the bias to the scores + scores = scores + bias + return scores + + def _prepare_bias_variables(self, scores: torch.FloatTensor): + vocabulary_size = scores.shape[-1] + + # Check biased tokens out of bounds + invalid_biases = [] + for sequence_ids in self.sequence_bias: + for token_id in sequence_ids: + if token_id >= vocabulary_size: + invalid_biases.append(token_id) + if len(invalid_biases) > 0: + raise ValueError( + f"The model vocabulary size is {vocabulary_size}, but the following tokens were being biased: " + f"{invalid_biases}" + ) + + # Precompute the bias tensors to be applied. Sequences of length 1 are kept separately, as they can be applied + # with simpler logic. + self.length_1_bias = torch.zeros((vocabulary_size,), dtype=torch.float).to(scores.device) + for sequence_ids, bias in self.sequence_bias.items(): + if len(sequence_ids) == 1: + self.length_1_bias[sequence_ids[-1]] = bias + + self.prepared_bias_variables = True + + def _validate_arguments(self): + sequence_bias = self.sequence_bias + if not isinstance(sequence_bias, dict) or len(sequence_bias) == 0: + raise ValueError(f"`sequence_bias` has to be a non-empty dictionary, but is {sequence_bias}.") + if any(not isinstance(sequence_ids, tuple) for sequence_ids in sequence_bias.keys()): + raise ValueError(f"`sequence_bias` has to be a dict with tuples as keys, but is {sequence_bias}.") + if any( + any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in sequence_ids) + or len(sequence_ids) == 0 + for sequence_ids in sequence_bias.keys() + ): + raise ValueError( + f"Each key in `sequence_bias` has to be a non-empty tuple of positive integers, but is " + f"{sequence_bias}." + ) + if any(not isinstance(bias, float) for bias in sequence_bias.values()): + raise ValueError(f"`sequence_bias` has to be a dict with floats as values, but is {sequence_bias}.") + + +class NoBadWordsLogitsProcessor(SequenceBiasLogitsProcessor): + """ + [`LogitsProcessor`] that enforces that specified sequences will never be selected. + + + + In order to get the token ids of the words that should not appear in the generated text, make sure to set + `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words, + add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers, + as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more + [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). + + + + Args: + bad_words_ids (`List[List[int]]`): + List of list of token ids that are not allowed to be generated. + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> inputs = tokenizer(["In a word, the cake is a"], return_tensors="pt") + + >>> output_ids = model.generate(inputs["input_ids"], max_new_tokens=5, pad_token_id=tokenizer.eos_token_id) + >>> print(tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]) + In a word, the cake is a bit of a mess. + + >>> # Now let's take the bad words out. Please note that the tokenizer is initialized differently + >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("gpt2", add_prefix_space=True) + + + >>> def get_tokens_as_list(word_list): + ... "Converts a sequence of words into a list of tokens" + ... tokens_list = [] + ... for word in word_list: + ... tokenized_word = tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0] + ... tokens_list.append(tokenized_word) + ... return tokens_list + + + >>> bad_words_ids = get_tokens_as_list(word_list=["mess"]) + >>> output_ids = model.generate( + ... inputs["input_ids"], max_new_tokens=5, bad_words_ids=bad_words_ids, pad_token_id=tokenizer.eos_token_id + ... ) + >>> print(tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]) + In a word, the cake is a bit of a surprise. + ``` + """ + + def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]): + self.bad_word_ids = bad_words_ids + self._validate_arguments() + + # Filter EOS token from bad_words_ids + if eos_token_id is None: + eos_token_id = [] + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + bad_words_ids = list( + filter(lambda bad_token_seq: all(bad_token_seq != [i] for i in eos_token_id), bad_words_ids) + ) + + # Forbidding a sequence is equivalent to setting its bias to -inf + sequence_bias = {tuple(sequence): float("-inf") for sequence in bad_words_ids} + super().__init__(sequence_bias=sequence_bias) + + def _validate_arguments(self): + bad_words_ids = self.bad_word_ids + if not isinstance(bad_words_ids, list) or len(bad_words_ids) == 0: + raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") + if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): + raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") + if any( + any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) + for bad_word_ids in bad_words_ids + ): + raise ValueError( + f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." + ) + + +class PrefixConstrainedLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained + generation. See [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904) for more information. + + Args: + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`): + This function constraints the beam search to allowed tokens only at each step. This function takes 2 + arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the + next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID + `batch_id`. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("Alice and Bob", return_tensors="pt") + + >>> # By default, it continues generating according to the model's logits + >>> outputs = model.generate(**inputs, max_new_tokens=5) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice and Bob are friends + + >>> # We can contrain it with `prefix_allowed_tokens_fn` to force a certain behavior based on a prefix. + >>> # For instance, we can force an entire entity to be generated when its beginning is detected. + >>> entity = tokenizer(" Bob Marley", return_tensors="pt").input_ids[0] # 3 tokens + >>> def prefix_allowed_tokens_fn(batch_id, input_ids): + ... ''' + ... Attempts to generate 'Bob Marley' when 'Bob' is detected. + ... In this case, `batch_id` is not used, but you can set rules for each batch member. + ... ''' + ... if input_ids[-1] == entity[0]: + ... return entity[1] + ... elif input_ids[-2] == entity[0] and input_ids[-1] == entity[1]: + ... return entity[2] + ... return list(range(tokenizer.vocab_size)) # If no match, allow all tokens + + >>> outputs = model.generate(**inputs, max_new_tokens=5, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice and Bob Marley + ``` + """ + + def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int): + self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn + self._num_beams = num_beams + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + mask = torch.full_like(scores, -math.inf) + for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])): + for beam_id, sent in enumerate(beam_sent): + prefix_allowed_tokens = self._prefix_allowed_tokens_fn(batch_id, sent) + if len(prefix_allowed_tokens) == 0: + raise ValueError( + f"`prefix_allowed_tokens_fn` returned an empty list for batch ID {batch_id}." + f"This means that the constraint is unsatisfiable. Please check your implementation" + f"of `prefix_allowed_tokens_fn` " + ) + mask[batch_id * self._num_beams + beam_id, prefix_allowed_tokens] = 0 + + return scores + mask + + +class HammingDiversityLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces diverse beam search. + + Note that this logits processor is only effective for [`PreTrainedModel.group_beam_search`]. See [Diverse Beam + Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf) for more + details. + + Traditional beam search often generates very similar sequences across different beams. + `HammingDiversityLogitsProcessor` addresses this by penalizing beams that generate tokens already chosen by other + beams in the same time step. + + Args: + diversity_penalty (`float`): + This value is subtracted from a beam's score if it generates a token same as any beam from other group at a + particular time. A higher `diversity_penalty` will enforce greater diversity among the beams. Adjusting + this value can help strike a balance between diversity and natural likelihood. + num_beams (`int`): + Number of beams for beam search. 1 means no beam search. + num_beam_groups (`int`): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + >>> import torch + + >>> # Initialize the model and tokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + + >>> # A long text about the solar system + >>> text = ( + ... "The Solar System is a gravitationally bound system comprising the Sun and the objects that orbit it, " + ... "either directly or indirectly. Of the objects that orbit the Sun directly, the largest are the eight " + ... "planets, with the remainder being smaller objects, such as the five dwarf planets and small Solar System " + ... "bodies. The Solar System formed 4.6 billion years ago from the gravitational collapse of a giant " + ... "interstellar molecular cloud." + ... ) + >>> inputs = tokenizer("summarize: " + text, return_tensors="pt") + + >>> # Generate diverse summary + >>> outputs_diverse = model.generate( + ... **inputs, + ... num_beam_groups=2, + ... diversity_penalty=10.0, + ... max_length=100, + ... num_beams=4, + ... num_return_sequences=2, + ... ) + >>> summaries_diverse = tokenizer.batch_decode(outputs_diverse, skip_special_tokens=True) + + >>> # Generate non-diverse summary + >>> outputs_non_diverse = model.generate( + ... **inputs, + ... max_length=100, + ... num_beams=4, + ... num_return_sequences=2, + ... ) + >>> summary_non_diverse = tokenizer.batch_decode(outputs_non_diverse, skip_special_tokens=True) + + >>> # With `diversity_penalty`, the resulting beams are much more diverse + >>> print(summary_non_diverse) + ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.', + 'the Solar System formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.'] + + >>> print(summaries_diverse) + ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.', + 'the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets. the rest of the objects are smaller objects, such as the five dwarf planets and small solar system bodies.'] + ``` + """ + + def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int): + if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0): + raise ValueError("`diversity_penalty` should be a float strictly larger than 0.") + self._diversity_penalty = diversity_penalty + if not isinstance(num_beams, int) or num_beams < 2: + raise ValueError("`num_beams` should be an integer strictly larger than 1.") + self._num_beams = num_beams + if not isinstance(num_beam_groups, int) or num_beam_groups < 2: + raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.") + if num_beam_groups > num_beams: + raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.") + self._num_sub_beams = num_beams // num_beam_groups + + def __call__( + self, + input_ids: torch.LongTensor, + scores: torch.FloatTensor, + current_tokens: torch.LongTensor, + beam_group_idx: int, + ) -> torch.FloatTensor: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + current_tokens (`torch.LongTensor` of shape `(batch_size)`): + Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other + beam groups in the current generation step. + beam_group_idx (`int`): + The index of the beam group currently being processed. + + Return: + `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: + The processed prediction scores. + """ + # hamming diversity: penalise using same token in current group which was used in previous groups at + # the same time step + batch_size = current_tokens.shape[0] // self._num_beams + group_start_idx = beam_group_idx * self._num_sub_beams + group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams) + group_size = group_end_idx - group_start_idx + vocab_size = scores.shape[-1] + + if group_start_idx == 0: + return scores + + for batch_idx in range(batch_size): + # predicted tokens of last time step of previous groups + previous_group_tokens = current_tokens[ + batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx + ] + token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device) + scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency + + return scores + + +class ForcedBOSTokenLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces the specified token as the first generated token. Used with encoder-decoder + models. + + Args: + bos_token_id (`int`): + The id of the token to force as the first generated token. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small") + >>> tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small") + + >>> inputs = tokenizer("Translate from English to German: I love cats.", return_tensors="pt") + + >>> # By default, it continues generating according to the model's logits + >>> outputs = model.generate(**inputs, max_new_tokens=10) + >>> print(tokenizer.batch_decode(outputs)[0]) + Ich liebe Kitty. + + >>> # We can use `forced_bos_token_id` to force the start of generation with an encoder-decoder model + >>> # (including forcing it to end straight away with an EOS token) + >>> outputs = model.generate(**inputs, max_new_tokens=10, forced_bos_token_id=tokenizer.eos_token_id) + >>> print(tokenizer.batch_decode(outputs)[0]) + + ``` + """ + + def __init__(self, bos_token_id: int): + self.bos_token_id = bos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + if cur_len == 1: + num_tokens = scores.shape[1] + scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf") + scores[:, self.bos_token_id] = 0 + return scores + + +class ForcedEOSTokenLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. + + Args: + max_length (`int`): + The maximum length of the sequence to be generated. + eos_token_id (`Union[int, List[int]]`): + The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a + list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2, 3", return_tensors="pt") + + >>> # By default, it continues generating according to the model's logits + >>> outputs = model.generate(**inputs, max_new_tokens=10) + >>> print(tokenizer.batch_decode(outputs)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8 + + >>> # `forced_eos_token_id` ensures the generation ends with a EOS token + >>> outputs = model.generate(**inputs, max_new_tokens=10, forced_eos_token_id=tokenizer.eos_token_id) + >>> print(tokenizer.batch_decode(outputs)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7,<|endoftext|> + ``` + """ + + def __init__(self, max_length: int, eos_token_id: Union[int, List[int]]): + self.max_length = max_length + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + if cur_len == self.max_length - 1: + num_tokens = scores.shape[1] + scores[:, [i for i in range(num_tokens) if i not in self.eos_token_id]] = -float("inf") + for i in self.eos_token_id: + scores[:, i] = 0 + return scores + + +class InfNanRemoveLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using + the logits processor should only be used if necessary since it can slow down the generation method. + + This logits processor has no `generate` example, as there shouldn't be a correct combination of flags that warrants + its use. + """ + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # set all nan values to 0.0 + scores[scores != scores] = 0.0 + + # set all +/-inf values to max/min possible value + scores[scores == float("inf")] = torch.finfo(scores.dtype).max + scores[scores == float("-inf")] = torch.finfo(scores.dtype).min + + return scores + + +class ExponentialDecayLengthPenalty(LogitsProcessor): + r""" + [`LogitsProcessor`] that exponentially increases the score of the `eos_token_id` after `start_index` has been + reached. This allows generating shorter sequences without having a hard cutoff, allowing the `eos_token` to be + predicted in a meaningful position. + + Args: + exponential_decay_length_penalty (`tuple(int, float)`): + This tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty + starts and `decay_factor` represents the factor of exponential decay + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + input_ids_seq_length (`int`): + The length of the input sequence. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + + >>> text = "Just wanted to let you know, I" + >>> inputs = tokenizer(text, return_tensors="pt") + + >>> # Let's consider that we want short sentences, so we limit `max_length=30`. However, we observe that the answer + >>> # tends to end abruptly. + >>> set_seed(1) + >>> outputs = model.generate(**inputs, do_sample=True, temperature=0.9, max_length=30, pad_token_id=50256) + >>> print(tokenizer.batch_decode(outputs)[0]) + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was + published in 2010. Although + + >>> # To promote the appearance of the EOS token at the right time, we add the `exponential_decay_length_penalty = + >>> # (start_index, decay_factor)`. Instead of cutting at max_tokens, the output comes to an end before and usually + >>> # with more meaning. What happens is that starting from `start_index` the EOS token score will be increased + >>> # by `decay_factor` exponentially. However, if you set a high decay factor, you may also end up with abruptly + >>> # ending sequences. + >>> set_seed(1) + >>> outputs = model.generate( + ... **inputs, + ... do_sample=True, + ... temperature=0.9, + ... max_length=30, + ... pad_token_id=50256, + ... exponential_decay_length_penalty=(15, 1.6), + ... ) + >>> print(tokenizer.batch_decode(outputs)[0]) + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network + which<|endoftext|> + + >>> # With a small decay factor, you will have a higher chance of getting a meaningful sequence. + >>> set_seed(1) + >>> outputs = model.generate( + ... **inputs, + ... do_sample=True, + ... temperature=0.9, + ... max_length=30, + ... pad_token_id=50256, + ... exponential_decay_length_penalty=(15, 1.01), + ... ) + >>> print(tokenizer.batch_decode(outputs)[0]) + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was + published in 2010.<|endoftext|> + ``` + """ + + def __init__( + self, + exponential_decay_length_penalty: Tuple[int, float], + eos_token_id: Union[int, List[int]], + input_ids_seq_length: int, + ): + self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length + self.regulation_factor = exponential_decay_length_penalty[1] + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + if cur_len > self.regulation_start: + for i in self.eos_token_id: + penalty_idx = cur_len - self.regulation_start + # To support negative logits we compute the penalty of the absolute value and add to the original logit + scores[:, i] = scores[:, i] + torch.abs(scores[:, i]) * (pow(self.regulation_factor, penalty_idx) - 1) + return scores + + +class LogitNormalization(LogitsProcessor, LogitsWarper): + r""" + [`LogitsWarper`] and [`LogitsProcessor`] for normalizing the scores using log-softmax. It's important to normalize + the scores during beam search, after applying the logits processors or warpers, since the search algorithm used in + this library doesn't do it (it only does it before, but they may need re-normalization) but it still supposes that + the scores are normalized when comparing the hypotheses. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + >>> import torch + + >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2, 3", return_tensors="pt") + + >>> # By default, the scores are not normalized -- the sum of their exponentials is NOT a normalized probability + >>> # distribution, summing to 1 + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print(torch.sum(torch.exp(outputs.scores[-1]))) + tensor(816.3250) + + >>> # Normalizing them may have a positive impact on beam methods, or when using the scores on your application + >>> outputs = model.generate(**inputs, renormalize_logits=True, return_dict_in_generate=True, output_scores=True) + >>> print(torch.sum(torch.exp(outputs.scores[-1]))) + tensor(1.0000) + ``` + """ + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + scores = scores.log_softmax(dim=-1) + return scores + + +class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor): + r""" + [`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts + generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are + not generated at the begining. Originally created for + [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). + + Examples: + + ```python + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + + >>> # Whisper has `begin_suppress_tokens` set by default (= `[220, 50256]`). 50256 is the EOS token, so this means + >>> # it can't generate and EOS token in the first iteration, but it can in the others. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print(outputs.scores[1][0, 50256]) # 1 (and not 0) is the first freely generated token + tensor(-inf) + >>> print(outputs.scores[-1][0, 50256]) # in other places we can see some probability mass for EOS + tensor(29.9010) + + >>> # If we disable `begin_suppress_tokens`, we can generate EOS in the first iteration. + >>> outputs = model.generate( + ... **inputs, return_dict_in_generate=True, output_scores=True, begin_suppress_tokens=None + ... ) + >>> print(outputs.scores[1][0, 50256]) + tensor(11.2027) + ``` + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if input_ids.shape[1] == self.begin_index: + scores[:, self.begin_suppress_tokens] = -float("inf") + + return scores + + +class SuppressTokensLogitsProcessor(LogitsProcessor): + r""" + This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so + that they are not generated. Originally created for + [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). + + Examples: + + ```python + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + + >>> # Whisper has a long list of suppressed tokens. For instance, in this case, the token 1 is suppressed by default. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print(outputs.scores[1][0, 1]) # 1 (and not 0) is the first freely generated token + tensor(-inf) + + >>> # If we disable `suppress_tokens`, we can generate it. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True, suppress_tokens=None) + >>> print(outputs.scores[1][0, 1]) + tensor(5.7738) + ``` + """ + + def __init__(self, suppress_tokens): + self.suppress_tokens = list(suppress_tokens) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + scores[:, self.suppress_tokens] = -float("inf") + return scores + + +class ForceTokensLogitsProcessor(LogitsProcessor): + r""" + This processor takes a list of pairs of integers which indicates a mapping from generation indices to token + indices that will be forced before generation. The processor will set their log probs to `inf` so that they are + sampled at their corresponding index. Originally created for + [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). + + Examples: + ```python + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + + >>> # This Whisper model forces the generation to start with `50362` at the first position by default, i.e. + >>> # `"forced_decoder_ids": [[1, 50362]]`. This means all other tokens are masked out. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print( + ... all(outputs.scores[0][0, i] == float("-inf") for i in range(processor.tokenizer.vocab_size) if i != 50362) + ... ) + True + >>> print(outputs.scores[0][0, 50362]) + tensor(0.) + + >>> # If we disable `forced_decoder_ids`, we stop seeing that effect + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True, forced_decoder_ids=None) + >>> print( + ... all(outputs.scores[0][0, i] == float("-inf") for i in range(processor.tokenizer.vocab_size) if i != 50362) + ... ) + False + >>> print(outputs.scores[0][0, 50362]) + tensor(19.3140) + ``` + """ + + def __init__(self, force_token_map: List[List[int]]): + self.force_token_map = dict(force_token_map) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + generation_idx = input_ids.shape[-1] + current_token = self.force_token_map.get(generation_idx, None) + if current_token is not None: + scores[:, :] = -float("inf") + scores[:, current_token] = 0 + return scores + + +class WhisperTimeStampLogitsProcessor(LogitsProcessor): + r""" + + [`LogitsProcessor`] that modifies the logits for the generation of timestamps in the transcription. When the input + tokens are at a specific threshold, the processor sets the scores to negative infinity. The processor makes sure + that timestamp tokens appear in pairs, by masking out the logits that would break this pairing pattern. This is + done to maintain the consistency and structure of generated timestamps. It also ensures that when the predicted + probability of sampling any of the timestamp token is greater than any individual non-timestamp token, those + non-timestamp logits are set to negative infinity. This is done to ensure the generation of timestamps over other + potential tokens. + + + See [the paper](https://arxiv.org/abs/2212.04356) for more information. + + Args: + generate_config (`GenerateConfig`): + The generate config used to generate the output. The following parameters are required: + eos_token_id (`int`, *optional*, defaults to 50257): + The id of the *end-of-sequence* token. + no_timestamps_token_id (`int`, *optional*, defaults to 50363): + The id of the `"<|notimestamps|>"` token. + max_initial_timestamp_index (`int`, *optional*, defaults to 1): + Used to set the maximum value of the initial timestamp. This is used to prevent the model from + predicting timestamps that are too far in the future. + _detect_timestamp_from_logprob (`bool`, *optional*): Whether timestamps can be predicted from logprobs over all timestamps. + + Examples: + ``` python + >>> import torch + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration, GenerationConfig + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[3]["audio"]["array"], return_tensors="pt") + >>> input_features = inputs.input_features + + >>> #Displaying timestamps + >>> generated_ids = model.generate(inputs=input_features, return_timestamps=True) + >>> transcription = processor.batch_decode(generated_ids, decode_with_timestamps=True)[0] + >>> print("Transcription:", transcription) + Transcription: <|startoftranscript|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can<|6.44|><|6.44|> discover in it but little of rocky Ithaca.<|9.44|><|endoftext|> + + + >>> #No timestamps & change EOS: + >>> #This allows the user to select a specific token to terminate the sequence on, in this case it's the word "can"(460) + >>> model.generation_config.eos_token_id = 460 + >>> generated_ids = model.generate(inputs=input_features,return_timestamps=False) + >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> print("Transcription:", transcription) + Transcription: He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can + ``` + """ + + def __init__( + self, generate_config, _detect_timestamp_from_logprob: Optional[bool] = None + ): # support for the kwargs + self.eos_token_id = generate_config.eos_token_id + self.no_timestamps_token_id = generate_config.no_timestamps_token_id + self.timestamp_begin = generate_config.no_timestamps_token_id + 1 + + # this variable is mostly just used for testing + self._detect_timestamp_from_logprob = ( + _detect_timestamp_from_logprob + if _detect_timestamp_from_logprob is not None + else getattr(generate_config, "_detect_timestamp_from_logprob", True) + ) + + self.begin_index = ( + len(generate_config.forced_decoder_ids) + 1 if generate_config.forced_decoder_ids is not None else 1 + ) + self.max_initial_timestamp_index = getattr(generate_config, "max_initial_timestamp_index", None) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # suppress <|notimestamps|> which is handled by without_timestamps + scores[:, self.no_timestamps_token_id] = -float("inf") + + # timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly + for k in range(input_ids.shape[0]): + sampled_tokens = input_ids[k, self.begin_index :] + seq = list(sampled_tokens.tolist()) + + last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin + penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin + + if last_was_timestamp: + if penultimate_was_timestamp: # has to be non-timestamp + scores[k, self.timestamp_begin :] = -float("inf") + else: # cannot be normal text tokens + scores[k, : self.eos_token_id] = -float("inf") + + timestamps = sampled_tokens[sampled_tokens.ge(self.timestamp_begin)] + if timestamps.numel() > 0: + # `timestamps` shouldn't decrease; forbid timestamp tokens smaller than the last + # The following lines of code are copied from: https://github.com/openai/whisper/pull/914/files#r1137085090 + if last_was_timestamp and not penultimate_was_timestamp: + timestamp_last = timestamps[-1] + else: + # Avoid to emit <|0.00|> again + timestamp_last = timestamps[-1] + 1 + + scores[k, self.timestamp_begin : timestamp_last] = -float("inf") + + # apply the `max_initial_timestamp` option + if input_ids.shape[1] == self.begin_index: + scores[:, : self.timestamp_begin] = -float("inf") + + if self.max_initial_timestamp_index is not None: + last_allowed = self.timestamp_begin + self.max_initial_timestamp_index + scores[:, last_allowed + 1 :] = -float("inf") + + # if sum of probability over timestamps is above any other token, sample timestamp + logprobs = torch.nn.functional.log_softmax(scores.float(), dim=-1) + for k in range(input_ids.shape[0]): + timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1) + max_text_token_logprob = logprobs[k, : self.timestamp_begin].max() + if timestamp_logprob > max_text_token_logprob and self._detect_timestamp_from_logprob: + scores[k, : self.timestamp_begin] = -float("inf") + + return scores + + +class ClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] for classifier free guidance (CFG). The scores are split over the batch dimension, + where the first half correspond to the conditional logits (predicted from the input prompt) and the second half + correspond to the unconditional logits (predicted from an empty or 'null' prompt). The processor computes a + weighted average across the conditional and unconditional logits, parameterised by the `guidance_scale`. + + See [the paper](https://arxiv.org/abs/2306.05284) for more information. + + + + This logits processor is exclusivelly compatible with + [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) + + + + Args: + guidance_scale (float): + The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. + Higher guidance scale encourages the model to generate samples that are more closely linked to the input + prompt, usually at the expense of poorer quality. + + Examples: + + ```python + >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration + + >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") + >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") + + >>> inputs = processor( + ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], + ... padding=True, + ... return_tensors="pt", + ... ) + >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) + ``` + """ + + def __init__(self, guidance_scale): + if guidance_scale > 1: + self.guidance_scale = guidance_scale + else: + raise ValueError( + "Require guidance scale >1 to use the classifier free guidance processor, got guidance scale " + f"{guidance_scale}." + ) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # simple check to make sure we have compatible batch sizes between our + # logits scores (cond + uncond) and input ids (cond only) + if scores.shape[0] != 2 * input_ids.shape[0]: + raise ValueError( + f"Logits should have twice the batch size of the input ids, the first half of batches corresponding to " + f"the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got " + f"batch size {scores.shape[0]} for the logits and {input_ids.shape[0]} for the input ids." + ) + unguided_bsz = scores.shape[0] // 2 + cond_logits, uncond_logits = scores.split(unguided_bsz, dim=0) + scores = uncond_logits + (cond_logits - uncond_logits) * self.guidance_scale + return scores + + +class AlternatingCodebooksLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] enforcing alternated generation between the two codebooks of Bark. + + + + This logits processor is exclusivelly compatible with + [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark)'s fine submodel. See the model documentation + for examples. + + + + Args: + input_start_len (`int`): + The length of the initial input sequence. + semantic_vocab_size (`int`): + Vocabulary size of the semantic part, i.e number of tokens associated to the semantic vocabulary. + codebook_size (`int`): + Number of tokens associated to the codebook. + """ + + def __init__(self, input_start_len: int, semantic_vocab_size: int, codebook_size: int): + if not isinstance(input_start_len, int) or input_start_len < 0: + raise ValueError(f"`input_starting_length` has to be a non-negative integer, but is {input_start_len}") + + self.input_start_len = input_start_len + self.semantic_vocab_size = semantic_vocab_size + self.codebook_size = codebook_size + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + curr_len = input_ids.shape[-1] + + # even -> first codebook, odd -> second codebook + is_first_codebook = ((curr_len - self.input_start_len) % 2) == 0 + + if is_first_codebook: + scores[:, : self.semantic_vocab_size] = -float("inf") + scores[:, self.semantic_vocab_size + self.codebook_size :] = -float("inf") + else: + scores[:, : self.semantic_vocab_size + self.codebook_size] = -float("inf") + + return scores + + +class UnbatchedClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): + r""" + Logits processor for Classifier-Free Guidance (CFG). The processors computes a weighted average across scores + from prompt conditional and prompt unconditional (or negative) logits, parameterized by the `guidance_scale`. + The unconditional scores are computed internally by prompting `model` with the `unconditional_ids` branch. + + See [the paper](https://arxiv.org/abs/2306.17806) for more information. + + Args: + guidance_scale (`float`): + The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale != 1`. + Higher guidance scale encourages the model to generate samples that are more closely linked to the input + prompt, usually at the expense of poorer quality. A value smaller than 1 has the opposite effect, while + making the negative prompt provided with negative_prompt_ids (if any) act as a positive prompt. + model (`PreTrainedModel`): + The model computing the unconditional scores. Supposedly the same as the one computing the conditional + scores. Both models must use the same tokenizer. + unconditional_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary for the unconditional branch. If unset, will default to + the last token of the prompt. + unconditional_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Attention mask for unconditional_ids. + use_cache (`bool`, *optional*, defaults to `True`): + Whether to cache key/values during the negative prompt forward pass. + + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> inputs = tokenizer(["Today, a dragon flew over Paris, France,"], return_tensors="pt") + >>> out = model.generate(inputs["input_ids"], guidance_scale=1.5) + >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] + 'Today, a dragon flew over Paris, France, killing at least 50 people and injuring more than 100' + + >>> # with a negative prompt + >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt") + >>> out = model.generate(inputs["input_ids"], guidance_scale=2, negative_prompt_ids=neg_inputs["input_ids"]) + >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] + 'Today, a dragon flew over Paris, France, killing at least 130 people. French media reported that' + + >>> # with a positive prompt + >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt") + >>> out = model.generate(inputs["input_ids"], guidance_scale=0, negative_prompt_ids=neg_inputs["input_ids"]) + >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] + "Today, a dragon flew over Paris, France, and I'm very happy to be here. I" + ``` + """ + + def __init__( + self, + guidance_scale: float, + model, + unconditional_ids: Optional[torch.LongTensor] = None, + unconditional_attention_mask: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = True, + ): + self.guidance_scale = guidance_scale + self.model = model + self.unconditional_context = { + "input_ids": unconditional_ids, + "attention_mask": unconditional_attention_mask, + "use_cache": use_cache, + "past_key_values": None, + "first_pass": True, + } + + def get_unconditional_logits(self, input_ids): + if self.unconditional_context["first_pass"]: + if self.unconditional_context["input_ids"] is None: + self.unconditional_context["input_ids"] = input_ids[:, -1:] + if self.unconditional_context["attention_mask"] is None: + self.unconditional_context["attention_mask"] = torch.ones_like( + self.unconditional_context["input_ids"], dtype=torch.long + ) + input_ids = self.unconditional_context["input_ids"] + attention_mask = self.unconditional_context["attention_mask"] + self.unconditional_context["first_pass"] = False + else: + attention_mask = torch.cat( + [ + self.unconditional_context["attention_mask"], + torch.ones_like(input_ids[:, -1:], dtype=torch.long), + ], + dim=1, + ) + if not self.unconditional_context["use_cache"]: + input_ids = torch.cat([self.unconditional_context["input_ids"], input_ids[:, -1:]], dim=1) + else: + input_ids = input_ids[:, -1:] + self.unconditional_context["input_ids"] = input_ids + self.unconditional_context["attention_mask"] = attention_mask + + out = self.model( + input_ids, + attention_mask=attention_mask, + use_cache=self.unconditional_context["use_cache"], + past_key_values=self.unconditional_context["past_key_values"], + ) + self.unconditional_context["past_key_values"] = out.get("past_key_values", None) + + return out.logits + + def __call__(self, input_ids, scores): + scores = torch.nn.functional.log_softmax(scores, dim=-1) + if self.guidance_scale == 1: + return scores + + logits = self.get_unconditional_logits(input_ids) + + unconditional_logits = torch.nn.functional.log_softmax(logits[:, -1], dim=-1) + out = self.guidance_scale * (scores - unconditional_logits) + unconditional_logits + return out + + +class BarkEosPrioritizerLogitsProcessor(LogitsProcessor): + r"""This processor ensures that the EOS token is selected if its probability is greater than the `min_eos_p`. + + + + This logits processor is exclusivelly compatible with + [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark). See the model documentation for examples. + + + + Args: + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + min_eos_p (`float`, *optional*): + Minimum end of speech threshold. + """ + + def __init__(self, eos_token_id: Union[int, List[int]], min_eos_p: float): + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = eos_token_id + if min_eos_p is not None and min_eos_p <= 0: + raise ValueError(f"`min_eos_p` has to be a positive float, but is {min_eos_p}") + self.min_eos_p = min_eos_p + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if self.min_eos_p: + probs = torch.nn.functional.softmax(scores.float(), dim=-1) + # create scores full of -inf except for the eos_token_id + early_stop_scores = torch.ones_like(scores) * -float("inf") + early_stop_scores[:, self.eos_token_id] = scores[:, self.eos_token_id] + + do_early_stop = probs[:, self.eos_token_id] > self.min_eos_p + scores = torch.where(do_early_stop, early_stop_scores, scores) + + return scores diff --git a/modified/generation/stopping_criteria.py b/modified/generation/stopping_criteria.py new file mode 100644 index 0000000000000000000000000000000000000000..18764ac94d9129f0f15ca833993589b1dee78ab1 --- /dev/null +++ b/modified/generation/stopping_criteria.py @@ -0,0 +1,151 @@ +import time +import warnings +from abc import ABC +from copy import deepcopy +from typing import Optional + +import torch + +from ..utils import add_start_docstrings, logging + + +logger = logging.get_logger(__name__) + + +STOPPING_CRITERIA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax + or scores for each vocabulary token after SoftMax. If this stopping criteria depends on the `scores` input, + make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. + kwargs (`Dict[str, Any]`, *optional*): + Additional stopping criteria specific kwargs. + + Return: + `bool`. `False` indicates we should continue, `True` indicates we should stop. + +""" + + +class StoppingCriteria(ABC): + """Abstract base class for all stopping criteria that can be applied during generation. + + If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True, + output_scores=True` to `generate`. + """ + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + raise NotImplementedError("StoppingCriteria needs to be subclassed") + + +class MaxLengthCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep + in mind for decoder-only type of transformers, this will include the initial prompted tokens. + + Args: + max_length (`int`): + The maximum length that the output sequence can have in number of tokens. + max_position_embeddings (`int`, *optional*): + The maximum model length, as defined by the model's `config.max_position_embeddings` attribute. + """ + + def __init__(self, max_length: int, max_position_embeddings: Optional[int] = None): + self.max_length = max_length + self.max_position_embeddings = max_position_embeddings + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + cur_len = input_ids.shape[-1] + is_done = cur_len >= self.max_length + if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: + logger.warning_once( + "This is a friendly reminder - the current text generation call will exceed the model's predefined " + f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " + "exceptions, performance degradation, or nothing at all." + ) + return is_done + + +class MaxNewTokensCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in + mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very + close to `MaxLengthCriteria` but ignores the number of initial tokens. + + Args: + start_length (`int`): + The number of initial tokens. + max_new_tokens (`int`): + The maximum number of tokens to generate. + """ + + def __init__(self, start_length: int, max_new_tokens: int): + warnings.warn( + "The class `MaxNewTokensCriteria` is deprecated. " + f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " + "with `max_length = start_length + max_new_tokens` instead.", + FutureWarning, + ) + self.start_length = start_length + self.max_new_tokens = max_new_tokens + self.max_length = start_length + max_new_tokens + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + return input_ids.shape[-1] >= self.max_length + + +class MaxTimeCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the + time will start being counted when you initialize this function. You can override this by passing an + `initial_time`. + + Args: + max_time (`float`): + The maximum allowed time in seconds for the generation. + initial_time (`float`, *optional*, defaults to `time.time()`): + The start of the generation allowed time. + """ + + def __init__(self, max_time: float, initial_timestamp: Optional[float] = None): + self.max_time = max_time + self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + return time.time() - self.initial_timestamp > self.max_time + + +class StoppingCriteriaList(list): + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + return any(criteria(input_ids, scores) for criteria in self) + + @property + def max_length(self) -> Optional[int]: + for stopping_criterium in self: + if isinstance(stopping_criterium, MaxLengthCriteria): + return stopping_criterium.max_length + elif isinstance(stopping_criterium, MaxNewTokensCriteria): + return stopping_criterium.max_length + return None + + +def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList: + stopping_max_length = stopping_criteria.max_length + new_stopping_criteria = deepcopy(stopping_criteria) + if stopping_max_length is not None and stopping_max_length != max_length: + warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning) + elif stopping_max_length is None: + new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length)) + return new_stopping_criteria diff --git a/modified/generation/streamers.py b/modified/generation/streamers.py new file mode 100644 index 0000000000000000000000000000000000000000..4b299db5da6982e5f767fb4e8196dbde476dff9e --- /dev/null +++ b/modified/generation/streamers.py @@ -0,0 +1,227 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from queue import Queue +from typing import TYPE_CHECKING, Optional + + +if TYPE_CHECKING: + from ..models.auto import AutoTokenizer + + +class BaseStreamer: + """ + Base class from which `.generate()` streamers should inherit. + """ + + def put(self, value): + """Function that is called by `.generate()` to push new tokens""" + raise NotImplementedError() + + def end(self): + """Function that is called by `.generate()` to signal the end of generation""" + raise NotImplementedError() + + +class TextStreamer(BaseStreamer): + """ + Simple text streamer that prints the token(s) to stdout as soon as entire words are formed. + + + + The API for the streamer classes is still under development and may change in the future. + + + + Parameters: + tokenizer (`AutoTokenizer`): + The tokenized used to decode the tokens. + skip_prompt (`bool`, *optional*, defaults to `False`): + Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. + decode_kwargs (`dict`, *optional*): + Additional keyword arguments to pass to the tokenizer's `decode` method. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer + + >>> tok = AutoTokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") + >>> streamer = TextStreamer(tok) + + >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. + >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) + An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, + ``` + """ + + def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs): + self.tokenizer = tokenizer + self.skip_prompt = skip_prompt + self.decode_kwargs = decode_kwargs + + # variables used in the streaming process + self.token_cache = [] + self.print_len = 0 + self.next_tokens_are_prompt = True + + def put(self, value): + """ + Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. + """ + if len(value.shape) > 1 and value.shape[0] > 1: + raise ValueError("TextStreamer only supports batch size 1") + elif len(value.shape) > 1: + value = value[0] + + if self.skip_prompt and self.next_tokens_are_prompt: + self.next_tokens_are_prompt = False + return + + # Add the new token to the cache and decodes the entire thing. + self.token_cache.extend(value.tolist()) + text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) + + # After the symbol for a new line, we flush the cache. + if text.endswith("\n"): + printable_text = text[self.print_len :] + self.token_cache = [] + self.print_len = 0 + # If the last token is a CJK character, we print the characters. + elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): + printable_text = text[self.print_len :] + self.print_len += len(printable_text) + # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, + # which may change with the subsequent token -- there are probably smarter ways to do this!) + else: + printable_text = text[self.print_len : text.rfind(" ") + 1] + self.print_len += len(printable_text) + + self.on_finalized_text(printable_text) + + def end(self): + """Flushes any remaining cache and prints a newline to stdout.""" + # Flush the cache, if it exists + if len(self.token_cache) > 0: + text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) + printable_text = text[self.print_len :] + self.token_cache = [] + self.print_len = 0 + else: + printable_text = "" + + self.next_tokens_are_prompt = True + self.on_finalized_text(printable_text, stream_end=True) + + def on_finalized_text(self, text: str, stream_end: bool = False): + """Prints the new text to stdout. If the stream is ending, also prints a newline.""" + print(text, flush=True, end="" if not stream_end else None) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + +class TextIteratorStreamer(TextStreamer): + """ + Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is + useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive + Gradio demo). + + + + The API for the streamer classes is still under development and may change in the future. + + + + Parameters: + tokenizer (`AutoTokenizer`): + The tokenized used to decode the tokens. + skip_prompt (`bool`, *optional*, defaults to `False`): + Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. + timeout (`float`, *optional*): + The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions + in `.generate()`, when it is called in a separate thread. + decode_kwargs (`dict`, *optional*): + Additional keyword arguments to pass to the tokenizer's `decode` method. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer + >>> from threading import Thread + + >>> tok = AutoTokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") + >>> streamer = TextIteratorStreamer(tok) + + >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way. + >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20) + >>> thread = Thread(target=model.generate, kwargs=generation_kwargs) + >>> thread.start() + >>> generated_text = "" + >>> for new_text in streamer: + ... generated_text += new_text + >>> generated_text + 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,' + ``` + """ + + def __init__( + self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs + ): + super().__init__(tokenizer, skip_prompt, **decode_kwargs) + self.text_queue = Queue() + self.stop_signal = None + self.timeout = timeout + + def on_finalized_text(self, text: str, stream_end: bool = False): + """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.""" + self.text_queue.put(text, timeout=self.timeout) + if stream_end: + self.text_queue.put(self.stop_signal, timeout=self.timeout) + + def __iter__(self): + return self + + def __next__(self): + value = self.text_queue.get(timeout=self.timeout) + if value == self.stop_signal: + raise StopIteration() + else: + return value diff --git a/modified/generation/tf_logits_process.py b/modified/generation/tf_logits_process.py new file mode 100644 index 0000000000000000000000000000000000000000..fc9799b7ab39f19610faf3ac684e3cb287c95678 --- /dev/null +++ b/modified/generation/tf_logits_process.py @@ -0,0 +1,591 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Tuple + +import numpy as np +import tensorflow as tf + +from ..tf_utils import stable_softmax +from ..utils import add_start_docstrings +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam + search or log softmax for each vocabulary token when using beam search. + cur_len (`int`): + The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length + is the maximum length generate can produce, and we need to know which of its tokens are valid. + kwargs (`Dict[str, Any]`, *optional*): + Additional logits processor specific kwargs. + + Return: + `tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. +""" + + +class TFLogitsProcessor: + """Abstract base class for all logit processors that can be applied during generation.""" + + @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + """TF method for processing logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class TFLogitsWarper: + """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" + + @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + """TF method for warping logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class TFLogitsProcessorList(list): + """ + This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor. + This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the + inputs. + """ + + @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor: + for processor in self: + function_args = inspect.signature(processor.__call__).parameters + if len(function_args) > 3: + if not all(arg in kwargs for arg in list(function_args.keys())[2:]): + raise ValueError( + f"Make sure that all the required parameters: {list(function_args.keys())} for " + f"{processor.__class__} are passed to the logits processor." + ) + scores = processor(input_ids, scores, cur_len, **kwargs) + else: + scores = processor(input_ids, scores, cur_len) + return scores + + +class TFTemperatureLogitsWarper(TFLogitsWarper): + r""" + [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution). + + Args: + temperature (`float`): + The value used to module the logits distribution. + """ + + def __init__(self, temperature: float): + if not isinstance(temperature, float) or not (temperature > 0): + raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") + + self.temperature = temperature + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = scores / self.temperature + return scores + + +class TFTopKLogitsWarper(TFLogitsWarper): + r""" + [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. + + Args: + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") + + self.top_k = max(top_k, min_tokens_to_keep) + self.filter_value = filter_value + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + top_k = min(self.top_k, scores.shape[-1]) # Safety check + # Boolean mask containing all tokens with a probability less than the last token of the top-k + indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:] + next_scores = tf.where(indices_to_remove, self.filter_value, scores) + return next_scores + + +class TFTopPLogitsWarper(TFLogitsWarper): + """ + [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off. + + Args: + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): + raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.top_p = top_p + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1]) + + mask_scores = tf.fill(scores.shape, self.filter_value) + cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1) + score_mask = cumulative_probs < self.top_p + + # Also include the token that is higher than top_p (the first false = shift and insert a True on the left) + score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1) + + # Ensure min tokens to keep + score_mask = tf.concat( + ( + tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool), + score_mask[:, self.min_tokens_to_keep :], + ), + axis=-1, + ) + + # Mask the values that do not fit the criteria + topk_next_scores = tf.where(score_mask, topk_scores, mask_scores) + + # Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size) + # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we + # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`) + scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]]) + scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1) + next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape) + + return next_scores + + +class TFMinLengthLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. + + Args: + min_length (`int`): + The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`int`): + The id of the *end-of-sequence* token. + """ + + def __init__(self, min_length: int, eos_token_id: int): + if not isinstance(min_length, int) or min_length < 0: + raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") + + if not isinstance(eos_token_id, int) or eos_token_id < 0: + raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") + + self.min_length = min_length + self.eos_token_id = eos_token_id + + def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor: + eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id + scores = tf.where(eos_token_id_mask, float("-inf"), scores) + return scores + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + # applies eos token masking if the first argument is true + scores = tf.cond( + tf.less(cur_len, self.min_length), + lambda: self._apply_eos_token_mask(scores), + lambda: tf.identity(scores), + ) + return scores + + +class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences. + + Args: + repetition_penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + """ + + def __init__(self, penalty: float): + if not isinstance(penalty, float) or not (penalty > 0): + raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") + + self.penalty = penalty + + def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: + # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown + # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has + # the same token multiple times. + + # Gathers the penalties to apply + logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1) + logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties) + logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties) + + # Scatters the penalties + token_penalties = tf.ones(logits.shape) + batch_size = input_ids.shape[0] + seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape + indexable_prev_input_ids = tf.concat( + ( + tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1), + tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1), + ), + axis=1, + ) + token_penalties = tf.tensor_scatter_nd_update( + token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1]) + ) + return token_penalties + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores) + + scores = tf.math.multiply(scores, score_penalties) + + return scores + + +class TFNoBadWordsLogitsProcessor(TFLogitsProcessor): + """ + [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled. + + Args: + bad_words_ids (`List[List[int]]`): + List of list of token ids that are not allowed to be generated. In order to get the tokens of the words + that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing + the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space` + argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from + `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). + eos_token_id (`int`): + The id of the *end-of-sequence* token. + """ + + def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int): + if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0: + raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") + if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): + raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") + if any( + any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) + for bad_word_ids in bad_words_ids + ): + raise ValueError( + f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." + ) + + # stores the information about bad words in three tensors: + # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons + self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1) + # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons + bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids] + if any(word_len == 0 for word_len in bad_word_seqs_len): + raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list") + self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32) + # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned + self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids]) + + def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor: + def _tokens_match(bad_word_seq_number): + def _len_one(): + # If the bad sequence only has one token, always mask it + return tf.cond( + tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1), + lambda: tf.ones((), dtype=tf.bool), + _len_greater_than_cur_len, + ) + + def _len_greater_than_cur_len(): + # Otherwise, if the bad sequence is longer than the current length they can't ever match + return tf.cond( + tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]), + lambda: tf.zeros((), dtype=tf.bool), + _match_found, + ) + + def _match_found(): + # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield + # an answer (otherwise we get indexing exceptions) + compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1 + return tf.cond( + tf.math.reduce_all( + tf.math.equal( + row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len] + ) + ), + lambda: tf.ones((), dtype=tf.bool), + lambda: tf.zeros((), dtype=tf.bool), + ) + + match = _len_one() + return match + + # Compares the current row against all bad word sequences, obtaining a mask with the matches. + match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool) + row_banned_tokens = self.seq_forbidden_tokens[match_mask] + return row_banned_tokens + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous + # `input_ids`, they may have a different length for each row, and they may even be empty for some rows. + # To remain simple and XLA-compatible, we work on a per-row fashion. + # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes + # a frequent choke point. (make `cur_len` a tensor?) + def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor: + row_input_ids, row_score = row_inputs + banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len]) + banned_tokens_mask = tf.scatter_nd( + indices=tf.expand_dims(banned_tokens, axis=-1), + updates=tf.ones_like(banned_tokens, dtype=tf.bool), + shape=row_score.shape, + ) + row_score = tf.where(banned_tokens_mask, -float("inf"), row_score) + return row_score + + scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32) + return scores + + +class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] that enforces no repetition of n-grams. See + [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). + + Args: + ngram_size (`int`): + All ngrams of size `ngram_size` can only occur once. + """ + + def __init__(self, ngram_size: int): + if not isinstance(ngram_size, int) or ngram_size <= 0: + raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") + self.ngram_size = ngram_size + + def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len): + # Copied from fairseq for no_repeat_ngram in beam_search + if cur_len + 1 < self.ngram_size: + # return no banned tokens if we haven't generated ngram_size tokens yet + return [[] for _ in range(num_hypos)] + generated_ngrams = [{} for _ in range(num_hypos)] + prev_input_ids = input_ids[:, :cur_len] + for idx in range(num_hypos): + gen_tokens = prev_input_ids[idx].numpy().tolist() + generated_ngram = generated_ngrams[idx] + for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]): + prev_ngram_tuple = tuple(ngram[:-1]) + generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] + + def _get_generated_ngrams(hypo_idx): + # Before decoding the next token, prevent decoding of ngrams that have already appeared + start_idx = cur_len + 1 - self.ngram_size + ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) + return generated_ngrams[hypo_idx].get(ngram_idx, []) + + banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] + + return banned_tokens + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + # TODO (joao): enable XLA on this logits processor. See discussion and attempts in + # https://github.com/huggingface/transformers/pull/16974 + if not tf.executing_eagerly(): + raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.") + + batch_size, vocab_size = scores.shape + banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len) + + # create banned_tokens boolean mask + banned_tokens_indices_mask = [] + for banned_tokens_slice in banned_tokens: + banned_tokens_indices_mask.append( + [True if token in banned_tokens_slice else False for token in range(vocab_size)] + ) + + scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores) + + return scores + + +class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] that enforces the specified token as the first generated token. + + Args: + bos_token_id (`int`): + The id of the token to force as the first generated token. + """ + + def __init__(self, bos_token_id: int): + if bos_token_id < 0: + raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}") + self.bos_token_id = bos_token_id + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + if cur_len == 1: + batch_size, num_tokens = scores.shape + # sets the score to 0 in the bos_token_id column + scores = tf.zeros((batch_size, 1)) + # sets the score to -inf everywhere else + if self.bos_token_id > 0: + scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1) + if self.bos_token_id < (num_tokens - 1): + scores = tf.concat( + (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))), + axis=-1, + ) + return scores + + +class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. + + Args: + max_length (`int`): + The maximum length of the sequence to be generated. + eos_token_id (`int`): + The id of the token to force as the last generated token when `max_length` is reached. + """ + + def __init__(self, max_length: int, eos_token_id: int): + self.max_length = max_length + if eos_token_id < 0: + raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}") + self.eos_token_id = eos_token_id + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + if cur_len == self.max_length - 1: + batch_size, num_tokens = scores.shape + # sets the score to 0 in the eos_token_id column + scores = tf.zeros((batch_size, 1)) + # sets the score to -inf everywhere else + if self.eos_token_id > 0: + scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1) + if self.eos_token_id < (num_tokens - 1): + scores = tf.concat( + (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))), + axis=-1, + ) + return scores + + +class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor): + r""" + [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts + generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not + sampled at the begining of the generation. + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = tf.cond( + tf.equal(cur_len, self.begin_index), + lambda: tf.tensor_scatter_nd_update( + scores, + indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens], + updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))], + ), + lambda: scores, + ) + return scores + + +class TFSuppressTokensLogitsProcessor(TFLogitsProcessor): + r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they + are not sampled.""" + + def __init__(self, suppress_tokens): + self.suppress_tokens = list(suppress_tokens) + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = tf.tensor_scatter_nd_update( + scores, + indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens], + updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))], + ) + return scores + + +class TFForceTokensLogitsProcessor(TFLogitsProcessor): + r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token + indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to + `-inf` so that they are sampled at their corresponding index.""" + + def __init__(self, force_token_map: List[List[int]]): + force_token_map = dict(force_token_map) + # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the + # index of the array corresponds to the index of the token to be forced, for XLA compatibility. + # Indexes without forced tokens will have an negative value. + force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1 + for index, token in force_token_map.items(): + if token is not None: + force_token_array[index] = token + self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32) + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + def _force_token(generation_idx): + batch_size = scores.shape[0] + current_token = self.force_token_array[generation_idx] + + new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf") + indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1) + updates = tf.zeros((batch_size,), dtype=scores.dtype) + new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates) + return new_scores + + scores = tf.cond( + tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]), + # If the current length is geq than the length of force_token_array, the processor does nothing. + lambda: tf.identity(scores), + # Otherwise, it may force a certain token. + lambda: tf.cond( + tf.greater_equal(self.force_token_array[cur_len], 0), + # Only valid (positive) tokens are forced + lambda: _force_token(cur_len), + # Otherwise, the processor does nothing. + lambda: scores, + ), + ) + return scores diff --git a/modified/generation/tf_utils.py b/modified/generation/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..325e7e1cba2720876530e4aee8cee091b7039333 --- /dev/null +++ b/modified/generation/tf_utils.py @@ -0,0 +1,3187 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +import warnings +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf +from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice + +from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput +from ..models.auto import ( + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, +) +from ..tf_utils import shape_list, stable_softmax +from ..utils import ModelOutput, logging +from .configuration_utils import GenerationConfig +from .tf_logits_process import ( + TFForcedBOSTokenLogitsProcessor, + TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, + TFLogitsProcessorList, + TFMinLengthLogitsProcessor, + TFNoBadWordsLogitsProcessor, + TFNoRepeatNGramLogitsProcessor, + TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, + TFTemperatureLogitsWarper, + TFTopKLogitsWarper, + TFTopPLogitsWarper, +) + + +logger = logging.get_logger(__name__) + + +@dataclass +class TFGreedySearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFGreedySearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFSampleDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using sampling. + + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFSampleEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of + the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states + attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences, + num_heads, sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using beam search. + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights + of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states + attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, + sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSampleDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using beam sample. + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSampleEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size*num_beams, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFContrastiveSearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using contrastive search. + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFContrastiveSearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput] +TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput] +TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput] +TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput] +TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput] +TFGenerateOutput = Union[ + TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput +] + + +class TFGenerationMixin: + """ + A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`]. + + The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for: + - *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and + `do_sample=False` + - *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and + `top_k>1` + - *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1` + + You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To + learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + """ + + _seed_generator = None + + @property + def seed_generator(self): + warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning) + if self._seed_generator is None: + self._seed_generator = tf.random.Generator.from_non_deterministic_state() + return self._seed_generator + + supports_xla_generation = True + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." + ) + + def compute_transition_scores( + self, + sequences: tf.Tensor, + scores: Tuple[tf.Tensor], + beam_indices: Optional[tf.Tensor] = None, + normalize_logits: bool = False, + ) -> tf.Tensor: + """ + Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was + used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. + + Parameters: + sequences (`tf.Tensor`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or + shorter if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)`): + Transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of + `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each + tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at + generate-time. + normalize_logits (`bool`, *optional*, defaults to `False`): + Whether to normalize the logits (which, for legacy reasons, may be unnormalized). + + Return: + `tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing + the transition scores (logits) + + Examples: + + ```python + >>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM + >>> import numpy as np + + >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") + >>> tokenizer.pad_token_id = tokenizer.eos_token_id + >>> inputs = tokenizer(["Today is"], return_tensors="tf") + + >>> # Example 1: Print the scores for each token generated with Greedy Search + >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, normalize_logits=True + ... ) + >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for + >>> # encoder-decoder models, like BART or T5. + >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] + >>> generated_tokens = outputs.sequences[:, input_length:] + >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): + ... # | token | token string | logits | probability + ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") + | 262 | the | -1.413 | 24.33% + | 1110 | day | -2.609 | 7.36% + | 618 | when | -2.009 | 13.41% + | 356 | we | -1.859 | 15.58% + | 460 | can | -2.508 | 8.14% + + >>> # Example 2: Reconstruct the sequence scores from Beam Search + >>> outputs = model.generate( + ... **inputs, + ... max_new_tokens=5, + ... num_beams=4, + ... num_return_sequences=4, + ... return_dict_in_generate=True, + ... output_scores=True, + ... ) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False + ... ) + >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. + >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the + >>> # use case, you might want to recompute it with `normalize_logits=True`. + >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1) + >>> length_penalty = model.generation_config.length_penalty + >>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty) + >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) + True + ```""" + # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent + # to a beam search approach were the first (and only) beam is always selected + if beam_indices is None: + beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)]) + + # 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being + # seq_len - input_length + scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0)) + scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1])) + + # 3. Optionally normalize the logits (across the vocab dimension) + if normalize_logits: + scores = tf.nn.log_softmax(scores, axis=1) + + # 4. cut beam_indices to longest beam length + beam_indices_mask = beam_indices < 0 + max_beam_length = tf.math.reduce_max( + tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1) + ) + beam_indices = beam_indices[:, -max_beam_length:] + beam_indices_mask = beam_indices_mask[:, -max_beam_length:] + + # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards + beam_indices = tf.where(beam_indices_mask, 0, beam_indices) + + # 6. Define which indices contributed to scores + cut_idx = sequences.shape[-1] - max_beam_length + token_indices = sequences[:, cut_idx:] + gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape) + indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1) + + # 7. Compute scores + transition_scores = tf.gather_nd(scores, indices) + + # 8. Mask out transition_scores of beams that stopped early + transition_scores = tf.where(beam_indices_mask, 0, transition_scores) + + return transition_scores + + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not self.can_generate(): + generate_compatible_mappings = [ + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + # Excludes arguments that are handled before calling any model function + if self.config.is_encoder_decoder: + for key in ["decoder_input_ids"]: + model_kwargs.pop(key, None) + + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) + if "kwargs" in model_args or "model_kwargs" in model_args: + model_args |= set(inspect.signature(self.call).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + + def generate( + self, + inputs: Optional[tf.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[TFLogitsProcessorList] = None, + seed=None, + **kwargs, + ) -> Union[TFGenerateOutput, tf.Tensor]: + r""" + Generates sequences of token ids for models with a language modeling head. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](../generation_strategies). + + + + Parameters: + inputs (`tf.Tensor` of varying shape depending on the modality, *optional*): + The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the + method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` + should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of + `input_ids`, `input_values`, `input_features`, or `pixel_values`. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + seed (`List[int]`, *optional*): + Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the + `seed` argument from stateless functions in `tf.random`. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when + `config.return_dict_in_generate=True`) or a `tf.Tensor`. + + If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.TFGreedySearchDecoderOnlyOutput`], + - [`~generation.TFSampleDecoderOnlyOutput`], + - [`~generation.TFBeamSearchDecoderOnlyOutput`], + - [`~generation.TFBeamSampleDecoderOnlyOutput`] + + If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.TFGreedySearchEncoderDecoderOutput`], + - [`~generation.TFSampleEncoderDecoderOutput`], + - [`~generation.TFBeamSearchEncoderDecoderOutput`], + - [`~generation.TFBeamSampleEncoderDecoderOutput`] + + """ + + # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + self._validate_model_class() + + # priority: `generation_config` argument > `model.generation_config` (the default generation config) + if generation_config is None: + # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, + # two conditions must be met + # 1) the generation config must have been created from the model config (`_from_model_config` field); + # 2) the generation config must have seen no modification since its creation (the hash is the same). + if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( + self.generation_config + ): + new_generation_config = GenerationConfig.from_model_config(self.config) + if new_generation_config != self.generation_config: + warnings.warn( + "You have modified the pretrained model configuration to control generation. This is a" + " deprecated strategy to control generation and will be removed soon, in a future version." + " Please use and modify the model generation configuration (see" + " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" + ) + self.generation_config = new_generation_config + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + generation_config.validate() + self._validate_model_kwargs(model_kwargs.copy()) + + # 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models) + if inputs is not None: + if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating: + pass + elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating): + pass + else: + inputs = tf.cast(inputs, tf.int32) + if model_kwargs.get("attention_mask") is not None: + model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32) + if "decoder_input_ids" in model_kwargs: + if ( + isinstance(model_kwargs["decoder_input_ids"], tf.Tensor) + and model_kwargs["decoder_input_ids"].dtype.is_floating + ): + pass + elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype( + model_kwargs["decoder_input_ids"].dtype, np.floating + ): + pass + else: + model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32) + + # 3. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask") is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + use_xla = not tf.executing_eagerly() + if use_xla and not self.supports_xla_generation: + raise ValueError( + "The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())" + ) + + # 4. Define model inputs + inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( + inputs, generation_config.bos_token_id, model_kwargs + ) + # inputs_ids now has to be defined and cannot be None anymore + batch_size = shape_list(inputs_tensor)[0] + + # 5. Prepare other model kwargs + model_kwargs["output_attentions"] = generation_config.output_attentions + model_kwargs["output_hidden_states"] = generation_config.output_hidden_states + model_kwargs["use_cache"] = generation_config.use_cache + + accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys()) + requires_attention_mask = "encoder_outputs" not in model_kwargs + + if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: + model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( + inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id + ) + + # decoder-only models should use left-padding for generation + if not self.config.is_encoder_decoder: + if generation_config.pad_token_id is not None and tf.math.reduce_any( + inputs_tensor[:, -1] == generation_config.pad_token_id + ): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: + # if model is encoder decoder encoder_outputs are created and added to `model_kwargs` + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( + inputs_tensor, model_kwargs, model_input_name + ) + + # 6. Prepare model inputs which will be used for auto-regressive generation + if self.config.is_encoder_decoder: + input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( + batch_size=batch_size, + model_input_name=model_input_name, + model_kwargs=model_kwargs, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + ) + else: + input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") + + # 7. Prepare `max_length` depending on other stopping criteria. + input_ids_seq_length = shape_list(input_ids)[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + # 20 is the default max_length of the generation config + warnings.warn( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " + "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + if not has_default_max_length and generation_config.max_length is not None: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + + # If the input length is a tensor (i.e. dynamic length), skip length checks + if not isinstance(input_ids_seq_length, tf.Tensor): + if ( + generation_config.min_length is not None + and generation_config.min_length > generation_config.max_length + ): + raise ValueError( + f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger" + f" than the maximum length ({generation_config.max_length})" + ) + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing`max_new_tokens`." + ) + + # 8. determine generation mode + is_contrastive_search_gen_mode = ( + generation_config.top_k is not None + and generation_config.top_k > 1 + and generation_config.do_sample is False + and generation_config.penalty_alpha is not None + and generation_config.penalty_alpha > 0 + ) + is_greedy_gen_mode = ( + not is_contrastive_search_gen_mode + and (generation_config.num_beams == 1) + and generation_config.do_sample is False + ) + is_beam_gen_mode = ( + not is_contrastive_search_gen_mode + and (generation_config.num_beams > 1) + and generation_config.do_sample is False + ) + is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True + is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True + + # 9. prepare distribution pre_processing samplers + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + logits_processor=logits_processor, + ) + + # 10. go into different generation modes + if is_greedy_gen_mode: + if generation_config.num_return_sequences > 1: + raise ValueError( + f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" + " greedy search." + ) + # 11. run greedy search + return self.greedy_search( + input_ids, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + logits_processor=logits_processor, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + elif is_contrastive_search_gen_mode: + if generation_config.num_return_sequences > 1: + raise ValueError( + f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" + " contrastive search." + ) + # 11. run contrastive search + return self.contrastive_search( + input_ids, + top_k=generation_config.top_k, + penalty_alpha=generation_config.penalty_alpha, + logits_processor=logits_processor, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + elif is_sample_gen_mode: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config=generation_config) + + # 12. expand input_ids with `num_return_sequences` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_return_sequences, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 13. run sample + return self.sample( + input_ids, + logits_processor=logits_processor, + logits_warper=logits_warper, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + seed=seed, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + + elif is_beam_gen_mode: + if generation_config.num_beams < generation_config.num_return_sequences: + raise ValueError( + "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" + f" num_return_sequences, got {generation_config.num_beams} and" + f" {generation_config.num_return_sequences} (respectivelly)" + ) + + # 11. broadcast inputs to the desired number of beams + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + expand_in_new_axis=True, + **model_kwargs, + ) + + # 12. run beam search + return self.beam_search( + input_ids, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + length_penalty=generation_config.length_penalty, + early_stopping=generation_config.early_stopping, + logits_processor=logits_processor, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + num_return_sequences=generation_config.num_return_sequences, + **model_kwargs, + ) + + elif is_beam_sample_gen_mode: + if generation_config.num_beams < generation_config.num_return_sequences: + raise ValueError( + "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" + f" num_return_sequences, got {generation_config.num_beams} and" + f" {generation_config.num_return_sequences} (respectivelly)" + ) + + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config=generation_config) + + # 12. broadcast inputs to the desired number of beams + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + expand_in_new_axis=True, + **model_kwargs, + ) + + # 13. run beam sample (beam search with sampling) + return self.beam_search( + input_ids, + do_sample=True, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + length_penalty=generation_config.length_penalty, + early_stopping=generation_config.early_stopping, + logits_processor=logits_processor, + logits_warper=logits_warper, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + num_return_sequences=generation_config.num_return_sequences, + **model_kwargs, + ) + + def _prepare_attention_mask_for_generation( + self, + inputs: tf.Tensor, + pad_token_id: Optional[int], + eos_token_id: Optional[int], + ) -> tf.Tensor: + is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) + is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) + + # Check if input is input_ids and padded -> only then is attention_mask defined + if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: + return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32) + else: + return tf.ones(inputs.shape[:2], dtype=tf.int32) + + def _prepare_encoder_decoder_kwargs_for_generation( + self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None + ) -> Dict[str, Any]: + # 1. get encoder and store encoder outputs + encoder = self.get_encoder() + + # 2. prepare encoder args and encoder kwargs from model kwargs + irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not any(argument.startswith(p) for p in irrelevant_prefix) + } + encoder_signature = set(inspect.signature(encoder.call).parameters) + encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature + if not encoder_accepts_wildcard: + encoder_kwargs = { + argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature + } + + # 3. vision models don't use `attention_mask`. + encoder_kwargs["return_dict"] = True + encoder_kwargs[model_input_name] = inputs_tensor + if model_input_name != self.main_input_name: # in Keras, the first input must always be passed + encoder_kwargs[self.main_input_name] = None + encoder_outputs = encoder(**encoder_kwargs) + model_kwargs["encoder_outputs"] = encoder_outputs + + return model_kwargs + + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + model_input_name: str, + model_kwargs: Dict[str, tf.Tensor], + decoder_start_token_id: int = None, + bos_token_id: int = None, + ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]: + """Prepares `decoder_input_ids` for generation with encoder-decoder models""" + # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, + # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + elif "input_ids" in model_kwargs and model_input_name != "input_ids": + decoder_input_ids = model_kwargs.pop("input_ids") + else: + decoder_input_ids = None + + # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id + + # no user input -> use decoder_start_token_id as decoder_input_ids + if decoder_input_ids is None: + decoder_input_ids = decoder_input_ids_start + # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust + # decoder_attention_mask if provided) + elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id): + decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1) + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + decoder_attention_mask = tf.concat( + (tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), + axis=-1, + ) + model_kwargs["decoder_attention_mask"] = decoder_attention_mask + + return decoder_input_ids, model_kwargs + + def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: + # retrieve decoder_start_token_id for encoder-decoder models + # fall back to bos_token_id if necessary + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id is not None + else self.generation_config.decoder_start_token_id + ) + bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id + + if decoder_start_token_id is not None: + return decoder_start_token_id + elif bos_token_id is not None: + return bos_token_id + raise ValueError( + "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." + ) + + @staticmethod + def _expand_inputs_for_generation( + expand_size: int = 1, + is_encoder_decoder: bool = False, + input_ids: Optional[tf.Tensor] = None, + expand_in_new_axis: bool = False, + **model_kwargs, + ) -> Tuple[tf.Tensor, Dict[str, Any]]: + """ + Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...], + depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with + `expand_in_new_axis=True` + """ + + def _expand_tensor(tensor: tf.Tensor): + if expand_in_new_axis: + shape = shape_list(tensor) + return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:])) + else: + return tf.repeat(tensor, expand_size, axis=0) + + def _expand_dict_for_generation(dict_to_expand): + for key in dict_to_expand: + if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor): + dict_to_expand[key] = _expand_tensor(dict_to_expand[key]) + return dict_to_expand + + if input_ids is not None: + input_ids = _expand_tensor(input_ids) + + model_kwargs = _expand_dict_for_generation(model_kwargs) + + if is_encoder_decoder: + if model_kwargs.get("encoder_outputs") is None: + raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") + model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) + + return input_ids, model_kwargs + + def _prepare_model_inputs( + self, + inputs: Optional[tf.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, tf.Tensor]] = None, + ) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]: + """ + This function extracts the model-specific `inputs` for generation. + """ + # 1. retrieve all kwargs that are non-None or non-model input related. + # some encoder-decoder models have different names for model and encoder + if ( + self.config.is_encoder_decoder + and hasattr(self, "encoder") + and hasattr(self.encoder, "main_input_name") + and self.encoder.main_input_name != self.main_input_name + ): + input_name = self.encoder.main_input_name + else: + input_name = self.main_input_name + + model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} + + # 2. check whether model_input_name is passed as kwarg + # if yes and `inputs` is None use kwarg inputs + inputs_kwarg = model_kwargs.pop(input_name, None) + if inputs_kwarg is not None and inputs is not None: + raise ValueError( + f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " + f"Make sure to either pass {inputs} or {input_name}=..." + ) + elif inputs_kwarg is not None: + inputs = inputs_kwarg + + # 3. In the presence of `inputs_embeds` for text models: + # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model + # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with + # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) + # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and + # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. + if input_name == "input_ids" and "inputs_embeds" in model_kwargs: + if not self.config.is_encoder_decoder: + has_inputs_embeds_forwarding = "inputs_embeds" in set( + inspect.signature(self.prepare_inputs_for_generation).parameters.keys() + ) + if not has_inputs_embeds_forwarding: + raise ValueError( + f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " + "doesn't have its forwarding implemented. See the GPT2 implementation for an example " + "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" + ) + # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of + # the attention mask) can rely on the actual model input. + model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( + inputs, bos_token_id, model_kwargs=model_kwargs + ) + else: + if inputs is not None: + raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") + inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" + + # 4. if `inputs` is still None, try to create `input_ids` from BOS token + inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) + + return inputs, input_name, model_kwargs + + def _maybe_initialize_input_ids_for_generation( + self, + inputs: Optional[tf.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, tf.Tensor]] = None, + ) -> tf.Tensor: + """Initializes input ids for generation, if necessary.""" + if inputs is not None: + return inputs + + encoder_outputs = model_kwargs.get("encoder_outputs") + if self.config.is_encoder_decoder and encoder_outputs is not None: + # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding + shape = encoder_outputs.last_hidden_state.shape[:-1] + return tf.ones(shape, dtype=tf.int32) * -100 + + if bos_token_id is None: + raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") + + # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with + # soft-prompting or in multimodal implementations built on top of decoder-only language models. + batch_size = 1 + for value in model_kwargs.values(): + if isinstance(value, tf.Tensor): + batch_size = value.shape[0] + break + return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id + + @staticmethod + def _extract_past_from_model_output(outputs: ModelOutput): + past_key_values = None + if "past_key_values" in outputs: + past_key_values = outputs.past_key_values + elif "mems" in outputs: + past_key_values = outputs.mems + elif "past_buckets_states" in outputs: + past_key_values = outputs.past_buckets_states + return past_key_values + + def _update_model_kwargs_for_generation( + self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs) + + # update attention mask + if not is_encoder_decoder: + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = tf.concat( + [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 + ) + + return model_kwargs + + def _update_model_kwargs_for_xla_generation( + self, + model_outputs: ModelOutput, + model_kwargs: Dict[str, Any], + cur_len: int, + max_length: int, + batch_size: int, + is_encoder_decoder: bool = False, + batch_axis: int = 0, + ): + def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder): + """initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" + if is_encoder_decoder: + # One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor, + # 1s for the actual input_ids + decoder_attention_mask = tf.concat( + [ + tf.ones((batch_size, 1), dtype=tf.int32), + tf.zeros((batch_size, num_padding_values), dtype=tf.int32), + tf.ones((batch_size, 1), dtype=tf.int32), + ], + axis=1, + ) + mask = {"decoder_attention_mask": decoder_attention_mask} + else: + attention_mask = model_kwargs.pop("attention_mask") + # 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids + attention_mask = tf.concat( + [ + attention_mask, + tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype), + tf.ones((batch_size, 1), dtype=attention_mask.dtype), + ], + axis=1, + ) + mask = {"attention_mask": attention_mask} + return mask + + def _update_attention(model_kwargs, new_past_index, is_encoder_decoder): + """updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" + update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index + if is_encoder_decoder: + decoder_attention_mask = model_kwargs.pop("decoder_attention_mask") + decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype) + decoder_attention_mask = dynamic_update_slice( + decoder_attention_mask, decoder_attention_mask_update_slice, update_start + ) + mask = {"decoder_attention_mask": decoder_attention_mask} + else: + attention_mask = model_kwargs.pop("attention_mask") + attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype) + attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start) + mask = {"attention_mask": attention_mask} + return mask + + def _initialize_past(past_key_values, num_padding_values, batch_axis): + """initialize past_key_values with zeros -- the structure depends on `batch_axis`""" + if batch_axis == 0: + padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32) + new_past = () + for past_layer in past_key_values: + new_past_layer = list(past_layer) + for i in range(len(new_past_layer[:2])): + new_past_layer[i] = tf.pad(past_layer[i], padding_values) + new_past += (tuple(new_past_layer),) + else: + padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2)) + new_past = list(past_key_values) + for i in range(len(past_key_values)): + new_past[i] = tf.pad(past_key_values[i], padding_values) + return new_past + + def _update_past(past_key_values, new_past_index, batch_axis): + if batch_axis == 0: + slice_start_base = tf.constant([0, 0, 1, 0]) + new_past = () + for past_layer in past_key_values: + new_past_layer = list(past_layer) + for i in range(len(new_past_layer[:2])): + update_slice = past_layer[i][:, :, -1:] + # Write the last slice to the first open location in the padded past_key_values array + # and then truncate the last slice off the array + new_past_layer[i] = dynamic_update_slice( + past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index + ) + new_past += (tuple(new_past_layer),) + else: + slice_start_base = tf.constant([0, 0, 0, 1, 0]) + new_past = [None for _ in range(len(past_key_values))] + for i in range(len(past_key_values)): + update_slice = past_key_values[i][:, :, :, -1:] + # Write the last slice to the first open location in the padded past_key_values array + # and then truncate the last slice off the array + new_past[i] = dynamic_update_slice( + past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index + ) + return new_past + + past_key_values = self._extract_past_from_model_output(model_outputs) + if past_key_values is None: + raise ValueError( + "No known `past_key_values variable` found in model outputs (model outputs keys:" + f" {list(model_outputs.keys())})" + ) + is_past_initialized = model_kwargs.pop("past_key_values", None) is not None + + if not is_past_initialized: + # The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to + # previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step + # has `max_length - 1` past_key_values values). + num_padding_values = max_length - cur_len - 1 + mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder) + new_past = _initialize_past(past_key_values, num_padding_values, batch_axis) + else: + # The new index of past_key_values to be filled corresponds to the current length of the sequence, with two + # subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above) + # and -1 again because in an array the index is the length of the array minus 1. + new_past_index = cur_len - 2 + mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder) + new_past = _update_past(past_key_values, new_past_index, batch_axis) + + # sets the updated variables (mask and past_key_values) + model_kwargs.update(mask) + model_kwargs["past_key_values"] = tuple(new_past) + + return model_kwargs + + def _get_logits_warper( + self, + generation_config: GenerationConfig, + ) -> TFLogitsProcessorList: + """ + This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`] + instances used for multinomial sampling. + """ + + # instantiate warpers list + warpers = TFLogitsProcessorList() + + # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a + # better score (i.e. keep len(generation_config.eos_token_id) + 1) + if generation_config.num_beams > 1: + if isinstance(generation_config.eos_token_id, list): + min_tokens_to_keep = len(generation_config.eos_token_id) + 1 + else: + min_tokens_to_keep = 2 + else: + min_tokens_to_keep = 1 + + if generation_config.temperature is not None and generation_config.temperature != 1.0: + warpers.append(TFTemperatureLogitsWarper(generation_config.temperature)) + if generation_config.top_k is not None and generation_config.top_k != 0: + warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) + if generation_config.top_p is not None and generation_config.top_p < 1.0: + warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) + return warpers + + def _get_logits_processor( + self, + generation_config: GenerationConfig, + input_ids_seq_length: int, + logits_processor: Optional[TFLogitsProcessorList], + ) -> TFLogitsProcessorList: + """ + This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`] + instances used to modify the scores of the language model head. + """ + processors = TFLogitsProcessorList() + + # instantiate processors list + if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: + processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) + if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: + processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) + if generation_config.bad_words_ids is not None: + processors.append( + TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) + ) + if ( + generation_config.min_length is not None + and generation_config.eos_token_id is not None + and generation_config.min_length > 0 + ): + processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) + if generation_config.forced_bos_token_id is not None: + processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) + if generation_config.forced_eos_token_id is not None: + processors.append( + TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) + ) + if generation_config.suppress_tokens is not None: + processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) + if generation_config.begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = ( + begin_index + if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) + else begin_index + 1 + ) + if generation_config.forced_decoder_ids is not None: + begin_index += generation_config.forced_decoder_ids[-1][ + 0 + ] # generation starts after the last token that is forced + processors.append( + TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) + ) + if generation_config.forced_decoder_ids is not None: + processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids)) + + processors = self._merge_criteria_processor_list(processors, logits_processor) + return processors + + def _merge_criteria_processor_list( + self, + default_list: TFLogitsProcessorList, + custom_list: TFLogitsProcessorList, + ) -> TFLogitsProcessorList: + if len(custom_list) == 0: + return default_list + for default in default_list: + for custom in custom_list: + if type(custom) is type(default): + object_type = "logits processor" + raise ValueError( + f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" + f" `generate`, but it has already been created with the values {default}. {default} has been" + " created by passing the corresponding arguments to generate or by the model's config default" + f" values. If you just want to change the default values of {object_type} consider passing" + f" them as arguments to `generate` instead of using a custom {object_type}." + ) + default_list.extend(custom_list) + return default_list + + def greedy_search( + self, + input_ids: tf.Tensor, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + logits_processor: Optional[TFLogitsProcessorList] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFGreedySearchOutput, tf.Tensor]: + r""" + Generates sequences for models with a language modeling head using greedy decoding. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `call` function of the model. If + model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or + `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a + [`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... TFAutoModelForCausalLM, + ... TFLogitsProcessorList, + ... TFMinLengthLogitsProcessor, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + + >>> input_prompt = "Today is a beautiful day, and" + >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids + + >>> # instantiate logits processors + >>> logits_processor = TFLogitsProcessorList( + ... [ + ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + + >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"] + ```""" + + # 1. init greedy_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + # some models, like XLNet, need more than the last token in the presence of past_key_values + needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, cur_len = shape_list(input_ids) + + # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` + input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + # define condition fn + def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs): + """state termination condition fn.""" + return ~tf.reduce_all(finished_sequences) + + # define condition fn + def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs): + """state update fn.""" + if model_kwargs.get("past_key_values") is None or needs_full_input: + input_ids = generated[:, :cur_len] + else: + input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) + model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) + # forward pass to get next token logits + model_outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + next_token_logits = model_outputs.logits[:, -1] + + # pre-process distribution + next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + scores.append(next_tokens_scores) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(model_outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.hidden_states) + + # argmax + next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32) + + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + cur_len += 1 + + # update model_kwargs + if use_xla: + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=model_outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=batch_size, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + # if we don't cache past_key_values key values we need the whole input + if model_kwargs.get("past_key_values", None) is None: + # let's throw out `past_key_values` since we don't want `None` tensors + model_kwargs.pop("past_key_values", None) + + return generated, finished_sequences, cur_len, model_kwargs + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` + generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn( + generated, finished_sequences, cur_len, model_kwargs + ) + + # 2-to-n generation steps can then be run in autoregressive fashion + # only in case 1st generation step does NOT yield EOS token though + maximum_iterations = max_length - cur_len + generated, _, cur_len, _ = tf.while_loop( + greedy_search_cond_fn, + greedy_search_body_fn, + (generated, finished_sequences, cur_len, model_kwargs), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + if not use_xla: + # cut for backward compatibility + generated = generated[:, :cur_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights + # and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + scores = tuple(scores) if scores is not None else None + decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None + cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None + decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None + + return TFGreedySearchEncoderDecoderOutput( + sequences=generated, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + return TFGreedySearchDecoderOnlyOutput( + sequences=generated, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return generated + + def sample( + self, + input_ids: tf.Tensor, + logits_processor: Optional[TFLogitsProcessorList] = None, + logits_warper: Optional[TFLogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + seed: Optional[Tuple[int, int]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFSampleOutput, tf.Tensor]: + r""" + Generates sequences for models with a language modeling head using multinomial sampling. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] + used to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + seed (`List[int]`, *optional*): + Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the + `seed` argument from stateless functions in `tf.random`. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an + encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A + `tf.Tensor` containing the generated tokens (default behaviour) or a + [`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> import tensorflow as tf + >>> from transformers import ( + ... AutoTokenizer, + ... TFAutoModelForCausalLM, + ... TFLogitsProcessorList, + ... TFMinLengthLogitsProcessor, + ... TFTopKLogitsWarper, + ... TFTemperatureLogitsWarper, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + + >>> input_prompt = "Today is a beautiful day, and" + >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids + + >>> # instantiate logits processors + >>> logits_processor = TFLogitsProcessorList( + ... [ + ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> # instantiate logits processors + >>> logits_warper = TFLogitsProcessorList( + ... [ + ... TFTopKLogitsWarper(50), + ... TFTemperatureLogitsWarper(0.7), + ... ] + ... ) + + >>> tf.random.set_seed(0) + >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Today is a beautiful day, and I love my country. But when I look at Donald Trump,'] + ```""" + + # 1. init greedy_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() + + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + # some models, like XLNet, need more than the last token in the presence of past_key_values + needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, cur_len = shape_list(input_ids) + + # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences` + input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs): + return ~tf.reduce_all(finished_sequences) + + def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs): + if model_kwargs.get("past_key_values") is None or needs_full_input: + input_ids = generated[:, :cur_len] + else: + input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) + model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) + # forward pass to get next token logits + model_outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + next_token_logits = model_outputs.logits[:, -1] + + # pre-process distribution + next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) + next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + scores.append(next_tokens_scores) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(model_outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.hidden_states) + + # sample + if seed is not None: + sample_seed = seed + else: + sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32) + next_tokens = tf.squeeze( + tf.random.stateless_categorical( + logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32 + ), + axis=1, + ) + + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + cur_len += 1 + + # update model_kwargs + if use_xla: + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=model_outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=batch_size, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + # if we don't cache past_key_values key values we need the whole input + if model_kwargs.get("past_key_values", None) is None: + # let's throw out `past_key_values` since we don't want `None` tensors + model_kwargs.pop("past_key_values", None) + + return generated, finished_sequences, cur_len, model_kwargs + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` + generated, finished_sequences, cur_len, model_kwargs = sample_body_fn( + generated, finished_sequences, cur_len, model_kwargs + ) + + # 2-to-n generation steps can then be run in autoregressive fashion + # only in case 1st generation step does NOT yield EOS token though + maximum_iterations = max_length - cur_len + generated, _, cur_len, _ = tf.while_loop( + sample_cond_fn, + sample_body_fn, + (generated, finished_sequences, cur_len, model_kwargs), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + if not use_xla: + # cut for backward compatibility + generated = generated[:, :cur_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights + # and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + scores = tuple(scores) if scores is not None else None + decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None + cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None + decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None + + return TFSampleEncoderDecoderOutput( + sequences=generated, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + return TFSampleDecoderOnlyOutput( + sequences=generated, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return generated + + @staticmethod + def _gather_beams(nested, beam_indices, batch_axis=0): + """Gathers the beam slices indexed by beam_indices into new beam array.""" + + def gather_fn(tensor): + if batch_axis > 0: + # pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...) + perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) + tensor = tf.transpose(tensor, perm=perm) + + gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) + if batch_axis > 0: + # transposes back to the original dimensions + perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) + perm = tf.math.invert_permutation(perm) + gathered_tensor = tf.transpose(gathered_tensor, perm=perm) + + return gathered_tensor + + return tf.nest.map_structure(gather_fn, nested) + + def beam_search( + self, + input_ids: tf.Tensor, + do_sample: bool = False, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + length_penalty: Optional[float] = None, + early_stopping: Optional[Union[bool, str]] = None, + logits_processor: Optional[TFLogitsProcessorList] = None, + logits_warper: Optional[TFLogitsProcessorList] = None, + num_return_sequences: Optional[int] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: + r""" + Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses + a greedy approach, otherwise does multinomial sampling without replacement. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. + early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following + values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; + `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better + candidates; `"never"`, where the beam search procedure only stops when there cannot be better + candidates (canonical beam search algorithm). + logits_processor (`[TFLogitsProcessorList]`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] + used to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + num_return_sequences(`int`, *optional*, defaults to 1): + The number of independently computed returned sequences for each element in the batch. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an + encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or + `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a + [`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... TFAutoModelForSeq2SeqLM, + ... TFLogitsProcessorList, + ... TFMinLengthLogitsProcessor, + ... ) + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") + >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32) + >>> input_ids = input_ids * model.generation_config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True) + >>> encoder_outputs.last_hidden_state = tf.repeat( + ... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1 + ... ) + >>> model_kwargs = {"encoder_outputs": encoder_outputs} + + >>> # instantiate logits processors + >>> logits_processor = TFLogitsProcessorList( + ... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)] + ... ) + + >>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + + def flatten_beam_dim(tensor, batch_axis=0): + """Flattens the first two dimensions of a non-scalar array.""" + shape = shape_list(tensor) + return tf.reshape( + tensor, + shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :], + ) + + def unflatten_beam_dim(tensor, num_beams, batch_axis=0): + """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" + shape = shape_list(tensor) + return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :]) + + # 1. init beam_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() + + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + num_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences + ) + + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty + early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping + + use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + # some models, like XLNet, need more than the last token in the presence of past_key_values + needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + all_scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, num_beams, cur_len = shape_list(input_ids) + # store the prompt length of decoder + decoder_prompt_len = cur_len + + # per batch, beam-item holding current token in loop, pre-populated with `pad_token_id` + input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * ( + pad_token_id or 0 + ) + running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1) + sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0) + + # per batch,beam-item state bit indicating if sentence has finished. + is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool) + + # per batch, beam-item score, logprobs + running_scores = tf.tile( + tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1] + ) + scores = tf.ones((batch_size, num_beams)) * -1.0e9 + + # per batch beam indices + running_beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 + beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 + + # flatten beam dim + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( + model_kwargs["encoder_outputs"]["last_hidden_state"] + ) + if "attention_mask" in model_kwargs: + model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"]) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + # define stop-condition and auto-regressive function + def beam_search_cond_fn( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ): + """ + Beam Search termination condition function -- halts the generation loop if any of these conditions becomes + False + """ + # 1. is less than max length? + not_max_length_yet = cur_len < max_length + + # 2. can the new beams still improve? + # early_stopping == False -> apply heuristic = always get the best score from `cur_len - decoder_prompt_len`. See the discussion + # below for more details. + # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 + # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of + # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. + if early_stopping == "never" and length_penalty > 0.0: + best_running_score = running_scores[:, :1] / ((max_length - decoder_prompt_len) ** length_penalty) + else: + best_running_score = running_scores[:, :1] / ( + tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty + ) + worst_finished_score = tf.where( + is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9 + ) + improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score) + + # 3. is there still a beam that has not finished? + still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True)) + + return not_max_length_yet & still_open_beam & improvement_still_possible + + def beam_search_body_fn( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ): + """ + Beam Search iterative update function -- each iteration adds a new token and updates the best sequences + seen so far + """ + # 1. Forward current tokens + if model_kwargs.get("past_key_values") is None or needs_full_input: + input_ids = running_sequences[:, :, :cur_len] + else: + input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1) + model_inputs = self.prepare_inputs_for_generation( + flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs + ) + model_outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams) + + # 2. Compute log probs + # get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and + # add new logprobs to existing running logprobs scores. + log_probs = tf.nn.log_softmax(logits) + log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) + log_probs = unflatten_beam_dim(log_probs, num_beams) + if do_sample: + log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) + log_probs = unflatten_beam_dim(log_probs, num_beams) + log_probs_processed = log_probs + log_probs = log_probs + tf.expand_dims(running_scores, axis=2) + vocab_size = log_probs.shape[2] + log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size)) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + all_scores.append( + logits_warper( + flatten_beam_dim(running_sequences), + flatten_beam_dim(log_probs_processed), + cur_len, + ) + ) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(model_outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.hidden_states) + + # 3. Retrieve top-K + # Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k + # candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the + # best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live + # beam search. + # Gather the top 2*K scores from _all_ beams. + # Gather 2*k top beams. + # Recover the beam index by floor division. + # Recover token id by modulo division and expand Id array for broadcasting. + # Update sequences for the 2*K top-k new sequences. + beams_to_keep = 2 * num_beams + if do_sample: + topk_indices = sample_without_replacement(log_probs, beams_to_keep) + topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1) + else: + topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep) + topk_current_beam_indices = topk_indices // vocab_size + topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices) + topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices) + topk_ids = topk_indices % vocab_size + + # writes the new token + indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep]) + indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size]) + update_indices = tf.stack( + [indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1 + ) + topk_sequences = tf.tensor_scatter_nd_update( + tensor=topk_running_sequences, + indices=update_indices, + updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]), + ) + + # we want to store the beam indices with batch information -> real beam index = beam index % num beams + batch_modified_indices = topk_current_beam_indices + tf.broadcast_to( + tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape + ) + update_indices = tf.stack( + [ + indices_batch, + indices_beam, + tf.broadcast_to(cur_len - decoder_prompt_len, [batch_size * beams_to_keep]), + ], + axis=-1, + ) + topk_beam_indices = tf.tensor_scatter_nd_update( + tensor=topk_running_beam_indices, + indices=update_indices, + updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]), + ) + + # 4. Check which sequences have ended + # Update current sequences: Did the top `num_beams` sequences reach an end marker? + # To prevent these just finished sequences from being added to the current sequences + # set of active beam search sequences, set their log probs to a very large negative value. + if eos_token_id is None: + eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool) + else: + eos_in_next_token = tf.math.reduce_any( + tf.equal( + tf.broadcast_to( + topk_sequences[:, :, cur_len], + [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape, + ), + tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1), + ), + axis=0, + ) + did_topk_just_finished = eos_in_next_token & tf.broadcast_to( + tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0), + shape_list(eos_in_next_token), + ) + + # non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next + # running sentences either + running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9 + + # 5. Get running sequences scores for next + # Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams + # (from top 2*k beams). + next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1] + next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams( + [topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices + ) + + # 6. Process topk logits + # Further process log probs: + # - add length penalty + # - make sure no scores can be added anymore if beam is full + # - make sure still running sequences cannot be chosen as finalized beam + topk_log_probs = topk_log_probs / ( + tf.cast(cur_len + 1 - decoder_prompt_len, dtype=tf.float32) ** length_penalty + ) + beams_in_batch_are_full = tf.broadcast_to( + tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished) + ) & (early_stopping is True) + add_penalty = ~did_topk_just_finished | beams_in_batch_are_full + topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9 + + # 7. Get scores, sequences, is sentence finished for next. + # Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores + # to existing finished scores and select the best from the new set of beams + merged_sequences = tf.concat([sequences, topk_sequences], axis=1) + merged_scores = tf.concat([scores, topk_log_probs], axis=1) + merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1) + merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1) + topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1] + next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams( + [merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices + ) + + # 8. Prepare data for the next iteration + # Determine the top k beam indices from the original set of all beams. With these, gather the top k + # beam-associated caches. + cur_len = cur_len + 1 + if "past_key_values" in model_outputs: + cache = tf.nest.map_structure( + lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis), + model_outputs.past_key_values, + ) + next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices) + next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis) + model_outputs["past_key_values"] = tf.nest.map_structure( + lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache + ) + + if use_xla: + next_model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=model_outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=(batch_size * num_beams), + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + next_model_kwargs = self._update_model_kwargs_for_generation( + model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # if we don't cache past_key_values key values we need the whole input + if model_kwargs.get("past_key_values", None) is None: + # let's throw out `past_key_values` since we don't want `None` tensors + model_kwargs.pop("past_key_values", None) + + return ( + cur_len, + next_running_sequences, + next_running_scores, + next_running_beam_indices, + next_sequences, + next_scores, + next_beam_indices, + next_is_sent_finished, + decoder_prompt_len, + next_model_kwargs, + ) + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` (if active) + ( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ) = beam_search_body_fn( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ) + + # 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does + # NOT yield EOS token though) + maximum_iterations = max_length - cur_len + ( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + _, + ) = tf.while_loop( + beam_search_cond_fn, + beam_search_body_fn, + ( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + # Account for the edge-case where there are no finished sequences for a particular batch item. If so, return + # running sequences for that batch item. + none_finished = tf.math.reduce_any(is_sent_finished, axis=1) + sequences = tf.where(none_finished[:, None, None], sequences, running_sequences) + beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices) + + # Apply the length penalty so that running scores match the finalized scores if they are used + running_scores = running_scores / (tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty) + scores = tf.where(none_finished[:, None], scores, running_scores) + + # Take best beams for each batch (the score is sorted in descending order) + sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) + scores = flatten_beam_dim(scores[:, :num_return_sequences]) + beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :]) + + if not use_xla: + # Cut for backward compatibility + sequences = sequences[:, :cur_len] + beam_indices = beam_indices[:, : cur_len - decoder_prompt_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput + return output_cls( + sequences=sequences, + sequences_scores=scores, + scores=all_scores, + beam_indices=beam_indices, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput + return output_cls( + sequences=sequences, + sequences_scores=scores, + scores=all_scores, + beam_indices=beam_indices, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return sequences + + def contrastive_search( + self, + input_ids: tf.Tensor, + top_k: Optional[int] = 1, + penalty_alpha: Optional[float] = 0, + logits_processor: Optional[TFLogitsProcessorList] = None, + logits_warper: Optional[TFLogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFContrastiveSearchOutput, tf.Tensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **contrastive search** and can + be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + top_k (`int`, *optional*, defaults to 1): + The size of the candidate set that is used to re-rank for contrastive search + penalty_alpha (`float`, *optional*, defaults to 0): + The degeneration penalty for contrastive search; activate when it is larger than 0 + logits_processor (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] + used to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `call` function of the model. If + model is an encoder-decoder model the kwargs should include `encoder_outputs`. + Return: + [`~generation.TFContrastiveSearchDecoderOnlyOutput`], + [`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the + generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if + `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a + [`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. + Examples: + ```python + >>> from transformers import AutoTokenizer, TFAutoModelForCausalLM + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") + >>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m") + >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token + >>> model.config.pad_token_id = model.config.eos_token_id + >>> input_prompt = "DeepMind Company is" + >>> input_ids = tokenizer(input_prompt, return_tensors="tf") + >>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] + ```""" + + def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0): + """Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors.""" + + def gather_fn(tensor): + gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis) + return gathered_tensor + + return tf.nest.map_structure(gather_fn, nested) + + # 1. init greedy_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + use_cache = True # In contrastive search, we always use cache + model_kwargs.pop("use_cache", None) + + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, cur_len = shape_list(input_ids) + + # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` + input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + # define condition fn + def contrastive_search_cond_fn( + generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables + ): + """state termination condition fn.""" + return ~tf.reduce_all(finished_sequences) + + # define condition fn + def contrastive_search_body_fn( + generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables + ): + """state update fn.""" + + # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; + # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step + if model_kwargs.get("past_key_values") is None: + # prepare inputs + model_inputs = self.prepare_inputs_for_generation( + generated[:, :cur_len], use_cache=use_cache, **model_kwargs + ) + + # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save + # the `encoder_outputs` + outputs = self( + **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions + ) + + # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with + # previous tokens) + if self.config.is_encoder_decoder: + last_hidden_states = outputs.decoder_hidden_states[-1] + else: + last_hidden_states = outputs.hidden_states[-1] + + # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across + # iterations (with fixed shapes) + if use_xla: + last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]]) + + # next logit for contrastive search to select top-k candidate tokens + logit_for_next_step = outputs.logits[:, -1, :] + + if use_xla: + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=batch_size, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # Expands model inputs top_k times, for batched forward passes (akin to beam search). + _, model_kwargs = self._expand_inputs_for_generation( + expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs + ) + + past_key_values = model_kwargs.get("past_key_values") + if past_key_values is None: + raise ValueError( + f"{self.__class__.__name__} does not support caching and therefore **can't** be used " + "for contrastive search." + ) + elif ( + not isinstance(past_key_values[0], (tuple, tf.Tensor)) + or past_key_values[0][0].shape[0] != batch_size + ): + raise ValueError( + f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " + "used for contrastive search without further modifications." + ) + else: + logit_for_next_step = next_step_cached_variables["logit_for_next_step"] + last_hidden_states = next_step_cached_variables["last_hidden_states"] + outputs = next_step_cached_variables["outputs"] + + # contrastive_search main logic start: + # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by + # degeneration penalty + + logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len) + logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len) + next_probs = stable_softmax(logit_for_next_step, axis=-1) + top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + scores.append(logit_for_next_step) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(outputs.hidden_states) + + # Replicates the new past_key_values to match the `top_k` candidates + model_kwargs["past_key_values"] = tf.nest.map_structure( + lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"] + ) + + # compute the candidate tokens by the language model and collects their hidden_states + next_model_inputs = self.prepare_inputs_for_generation( + tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs + ) + outputs = self( + **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions + ) + next_past_key_values = self._extract_past_from_model_output(outputs) + + logits = outputs.logits[:, -1, :] + # name is different for encoder-decoder and decoder-only models + if self.config.is_encoder_decoder: + next_hidden = outputs.decoder_hidden_states[-1] + full_hidden_states = outputs.decoder_hidden_states + else: + next_hidden = outputs.hidden_states[-1] + full_hidden_states = outputs.hidden_states + context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0) + + # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the + # model confidence + selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) + + # converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing + # without a need to reshape on tensors that have these two dimensions stacked + selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k + + # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing + # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores + # (model confidence minus degeneration penalty); (6) decoder hidden_states + next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1) + next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked) + + # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across + # iterations (with fixed shapes) + if use_xla: + last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0]) + else: + last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1) + + next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked) + next_past_key_values = gather_best_candidate( + next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis + ) + logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked) + + # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration + if self.config.is_encoder_decoder: + next_step_cross_attentions = () + next_step_decoder_attentions = () + if output_attentions: + next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked) + next_step_decoder_attentions = gather_best_candidate( + outputs.decoder_attentions, selected_idx_stacked + ) + outputs = TFSeq2SeqLMOutput( + past_key_values=next_past_key_values, + decoder_hidden_states=next_decoder_hidden_states, + decoder_attentions=next_step_decoder_attentions or None, + cross_attentions=next_step_cross_attentions or None, + ) + else: + next_step_attentions = () + if output_attentions: + next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked) + outputs = TFCausalLMOutputWithPast( + past_key_values=next_past_key_values, + hidden_states=next_decoder_hidden_states, + attentions=next_step_attentions or None, + ) + # contrastive_search main logic end + + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + cur_len += 1 + + if use_xla: + # NOTE: 1) relative to other generation strategies, contrastive search is always running forward + # passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from + # [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k` + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=outputs, + model_kwargs=model_kwargs, + cur_len=cur_len + 1, + max_length=max_length, + batch_size=batch_size * top_k, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + next_step_cached_variables = { + "logit_for_next_step": logit_for_next_step, + "last_hidden_states": last_hidden_states, + "outputs": outputs, + } + return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` + generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn( + generated, finished_sequences, cur_len, model_kwargs, None + ) + + # 2-to-n generation steps can then be run in autoregressive fashion + # only in case 1st generation step does NOT yield EOS token though + maximum_iterations = max_length - cur_len + generated, _, cur_len, _, _ = tf.while_loop( + contrastive_search_cond_fn, + contrastive_search_body_fn, + (generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + if not use_xla: + # cut for backward compatibility + generated = generated[:, :cur_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights + # and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + scores = tuple(scores) if scores is not None else None + decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None + cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None + decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None + + return TFContrastiveSearchEncoderDecoderOutput( + sequences=generated, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + return TFContrastiveSearchDecoderOnlyOutput( + sequences=generated, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return generated + + +def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): + """ + Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + + Args: + logits: logits distribution shape (batch size, vocabulary size) + top_k (`int`, *optional*, defaults to 0): + If > 0, only keep the top k tokens with highest probability (top-k filtering) + top_p (`float`, *optional*, defaults to 1.0): + If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus + filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimumber of tokens we keep per batch example in the output. + + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + logits_shape = shape_list(logits) + + if top_k > 0: + top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None] + logits = tf.where(indices_to_remove, filter_value, logits) + if top_p < 1.0: + sorted_indices = tf.argsort(logits, direction="DESCENDING") + sorted_logits = tf.gather( + logits, sorted_indices, axis=-1, batch_dims=1 + ) # expects logits to be of dim (batch_size, vocab_size) + + cumulative_probs = tf.math.cumsum(stable_softmax(sorted_logits, axis=-1), axis=-1) + + # Remove tokens with cumulative probability above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs > top_p + + if min_tokens_to_keep > 1: + # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) + sorted_indices_to_remove = tf.concat( + [ + tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]), + sorted_indices_to_remove[:, min_tokens_to_keep:], + ], + -1, + ) + + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove = tf.concat( + [tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, :-1]], + -1, + ) + # scatter sorted tensors to original indexing + indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices) + logits = tf.where(indices_to_remove, filter_value, logits) + return logits + + +def scatter_values_on_batch_indices(values, batch_indices): + shape = shape_list(batch_indices) + # broadcast batch dim to shape + broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) + # transform batch_indices to pair_indices + pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) + # scatter values to pair indices + return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) + + +def sample_without_replacement(logits, num_samples): + """ + categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see + https://github.com/tensorflow/tensorflow/issues/9260 for more info + """ + z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1))) + _, indices = tf.nn.top_k(logits + z, num_samples) + return indices + + +def _ranking_fast( + context_hidden: tf.Tensor, + next_hidden: tf.Tensor, + next_top_k_probs: tf.Tensor, + alpha: float, + beam_width: int, +) -> tf.Tensor: + """ + Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described + in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each + row in the batch. + """ + norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True) + norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True) + cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1) + degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1) + next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1]) + contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty + contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width]) + selected_idx = tf.argmax(contrastive_score, axis=1) + return selected_idx diff --git a/modified/generation/utils.py b/modified/generation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1d413b3ab443de276cb7182ccb3bb88a74b9fbc0 --- /dev/null +++ b/modified/generation/utils.py @@ -0,0 +1,4968 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +import warnings +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.distributed as dist +from torch import nn + +from ..cache_utils import Cache, DynamicCache +from ..integrations.deepspeed import is_deepspeed_zero3_enabled +from ..modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput +from ..models.auto import ( + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, +) +from ..utils import ExplicitEnum, ModelOutput, is_accelerate_available, logging +from .beam_constraints import DisjunctiveConstraint, PhrasalConstraint +from .beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer +from .configuration_utils import GenerationConfig +from .logits_process import ( + EncoderNoRepeatNGramLogitsProcessor, + EncoderRepetitionPenaltyLogitsProcessor, + EpsilonLogitsWarper, + EtaLogitsWarper, + ExponentialDecayLengthPenalty, + ForcedBOSTokenLogitsProcessor, + ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, + HammingDiversityLogitsProcessor, + InfNanRemoveLogitsProcessor, + LogitNormalization, + LogitsProcessorList, + MinLengthLogitsProcessor, + MinNewTokensLengthLogitsProcessor, + NoBadWordsLogitsProcessor, + NoRepeatNGramLogitsProcessor, + PrefixConstrainedLogitsProcessor, + RepetitionPenaltyLogitsProcessor, + SequenceBiasLogitsProcessor, + SuppressTokensAtBeginLogitsProcessor, + SuppressTokensLogitsProcessor, + TemperatureLogitsWarper, + TopKLogitsWarper, + TopPLogitsWarper, + TypicalLogitsWarper, + UnbatchedClassifierFreeGuidanceLogitsProcessor, +) +from .stopping_criteria import ( + MaxLengthCriteria, + MaxTimeCriteria, + StoppingCriteria, + StoppingCriteriaList, + validate_stopping_criteria, +) + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + from .streamers import BaseStreamer + +logger = logging.get_logger(__name__) + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, add_hook_to_module + + +@dataclass +class GreedySearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class ContrastiveSearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using contrastive search. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class ContrastiveSearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using contrastive search. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when + `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is + passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples + (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, + hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class GreedySearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class SampleDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using sampling. + + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, + sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class SampleEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of + the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states + attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape + `(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, + sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class BeamSearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using beam search. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + sequences_scores: Optional[torch.FloatTensor] = None + scores: Optional[Tuple[torch.FloatTensor]] = None + beam_indices: Optional[torch.LongTensor] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class BeamSearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights + of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states + attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, + sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + sequences_scores: Optional[torch.FloatTensor] = None + scores: Optional[Tuple[torch.FloatTensor]] = None + beam_indices: Optional[torch.LongTensor] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class BeamSampleDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using beam sample. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`torch.FloatTensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + sequences_scores: Optional[torch.FloatTensor] = None + scores: Optional[Tuple[torch.FloatTensor]] = None + beam_indices: Optional[torch.LongTensor] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class BeamSampleEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_beams, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`torch.FloatTensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`). + beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size*num_beams, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + sequences_scores: Optional[torch.FloatTensor] = None + scores: Optional[Tuple[torch.FloatTensor]] = None + beam_indices: Optional[torch.LongTensor] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput] +SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput] +BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput] +BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput] +ContrastiveSearchOutput = Union[ContrastiveSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput] +GenerateOutput = Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput] + + +class GenerationMode(ExplicitEnum): + """ + Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method. + """ + + # Non-beam methods + CONTRASTIVE_SEARCH = "contrastive_search" + GREEDY_SEARCH = "greedy_search" + SAMPLE = "sample" + ASSISTED_GENERATION = "assisted_generation" + # Beam methods + BEAM_SEARCH = "beam_search" + BEAM_SAMPLE = "beam_sample" + CONSTRAINED_BEAM_SEARCH = "constrained_beam_search" + GROUP_BEAM_SEARCH = "group_beam_search" + + +class GenerationMixin: + """ + A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`]. + + The class exposes [`~generation.GenerationMixin.generate`], which can be used for: + - *greedy decoding* by calling [`~generation.GenerationMixin.greedy_search`] if `num_beams=1` and + `do_sample=False` + - *contrastive search* by calling [`~generation.GenerationMixin.contrastive_search`] if `penalty_alpha>0` and + `top_k>1` + - *multinomial sampling* by calling [`~generation.GenerationMixin.sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.GenerationMixin.beam_search`] if `num_beams>1` and + `do_sample=False` + - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin.beam_sample`] if `num_beams>1` + and `do_sample=True` + - *diverse beam-search decoding* by calling [`~generation.GenerationMixin.group_beam_search`], if `num_beams>1` + and `num_beam_groups>1` + - *constrained beam-search decoding* by calling [`~generation.GenerationMixin.constrained_beam_search`], if + `constraints!=None` or `force_words_ids!=None` + + You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To + learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + """ + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + "A model class needs to define a `prepare_inputs_for_generation` method in order to use `.generate()`." + ) + + def _prepare_model_inputs( + self, + inputs: Optional[torch.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: + """ + This function extracts the model-specific `inputs` for generation. + """ + # 1. retrieve all kwargs that are non-None or non-model input related. + # some encoder-decoder models have different names for model and encoder + if ( + self.config.is_encoder_decoder + and hasattr(self, "encoder") + and self.encoder.main_input_name != self.main_input_name + ): + input_name = self.encoder.main_input_name + else: + input_name = self.main_input_name + + model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} + + # 2. check whether model_input_name is passed as kwarg + # if yes and `inputs` is None use kwarg inputs + inputs_kwarg = model_kwargs.pop(input_name, None) + if inputs_kwarg is not None and inputs is not None: + raise ValueError( + f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " + f"Make sure to either pass {inputs} or {input_name}=..." + ) + elif inputs_kwarg is not None: + inputs = inputs_kwarg + + # 3. In the presence of `inputs_embeds` for text models: + # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model + # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with + # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) + # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and + # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. + if input_name == "input_ids" and "inputs_embeds" in model_kwargs: + if not self.config.is_encoder_decoder: + has_inputs_embeds_forwarding = "inputs_embeds" in set( + inspect.signature(self.prepare_inputs_for_generation).parameters.keys() + ) + if not has_inputs_embeds_forwarding: + raise ValueError( + f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " + "doesn't have its forwarding implemented. See the GPT2 implementation for an example " + "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" + ) + # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of + # the attention mask) can rely on the actual model input. + model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( + inputs, bos_token_id, model_kwargs=model_kwargs + ) + else: + if inputs is not None: + raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") + inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" + + # 4. if `inputs` is still None, try to create `input_ids` from BOS token + inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) + return inputs, input_name, model_kwargs + + def _maybe_initialize_input_ids_for_generation( + self, + inputs: Optional[torch.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> torch.LongTensor: + """Initializes input ids for generation, if necessary.""" + if inputs is not None: + return inputs + + encoder_outputs = model_kwargs.get("encoder_outputs") + if self.config.is_encoder_decoder and encoder_outputs is not None: + # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding + shape = encoder_outputs.last_hidden_state.size()[:-1] + return torch.ones(shape, dtype=torch.long, device=self.device) * -100 + + if bos_token_id is None: + raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") + + # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with + # soft-prompting or in multimodal implementations built on top of decoder-only language models. + batch_size = 1 + for value in model_kwargs.values(): + if isinstance(value, torch.Tensor): + batch_size = value.shape[0] + break + return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id + + def _prepare_attention_mask_for_generation( + self, + inputs: torch.Tensor, + pad_token_id: Optional[int], + eos_token_id: Optional[Union[int, List[int]]], + ) -> torch.LongTensor: + is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] + is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id not in eos_token_id) + + # Check if input is input_ids and padded -> only then is attention_mask defined + if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: + return inputs.ne(pad_token_id).long() + else: + return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device) + + def _prepare_encoder_decoder_kwargs_for_generation( + self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None + ) -> Dict[str, Any]: + # 1. get encoder + encoder = self.get_encoder() + # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device + # as the inputs. + if hasattr(self, "hf_device_map"): + if hasattr(encoder, "_hf_hook"): + encoder._hf_hook.io_same_device = True + else: + add_hook_to_module(encoder, AlignDevicesHook(io_same_device=True)) + + # 2. Prepare encoder args and encoder kwargs from model kwargs. + irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not any(argument.startswith(p) for p in irrelevant_prefix) + } + encoder_signature = set(inspect.signature(encoder.forward).parameters) + encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature + if not encoder_accepts_wildcard: + encoder_kwargs = { + argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature + } + + # 3. make sure that encoder returns `ModelOutput` + model_input_name = model_input_name if model_input_name is not None else self.main_input_name + encoder_kwargs["return_dict"] = True + encoder_kwargs[model_input_name] = inputs_tensor + model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) + + return model_kwargs + + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + model_input_name: str, + model_kwargs: Dict[str, torch.Tensor], + decoder_start_token_id: int = None, + bos_token_id: int = None, + device: torch.device = None, + ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]: + """Prepares `decoder_input_ids` for generation with encoder-decoder models""" + # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, + # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + elif "input_ids" in model_kwargs and model_input_name != "input_ids": + decoder_input_ids = model_kwargs.pop("input_ids") + else: + decoder_input_ids = None + + # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + if device is None: + device = self.device + decoder_input_ids_start = torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id + + # no user input -> use decoder_start_token_id as decoder_input_ids + if decoder_input_ids is None: + decoder_input_ids = decoder_input_ids_start + # exception: Donut checkpoints have task-specific decoder starts and don't expect a BOS token + elif self.config.model_type == "vision-encoder-decoder" and "donut" in self.name_or_path.lower(): + pass + # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust + # decoder_attention_mask if provided) + elif (decoder_input_ids[:, 0] != decoder_start_token_id).all().item(): + decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + decoder_attention_mask = torch.cat( + (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), + dim=-1, + ) + model_kwargs["decoder_attention_mask"] = decoder_attention_mask + + return decoder_input_ids, model_kwargs + + def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id is not None + else self.generation_config.decoder_start_token_id + ) + bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id + + if decoder_start_token_id is not None: + return decoder_start_token_id + elif bos_token_id is not None: + return bos_token_id + raise ValueError( + "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." + ) + + @staticmethod + def _expand_inputs_for_generation( + expand_size: int = 1, + is_encoder_decoder: bool = False, + input_ids: Optional[torch.LongTensor] = None, + **model_kwargs, + ) -> Tuple[torch.LongTensor, Dict[str, Any]]: + """Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]""" + + def _expand_dict_for_generation(dict_to_expand): + for key in dict_to_expand: + if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], torch.Tensor): + dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) + return dict_to_expand + + if input_ids is not None: + input_ids = input_ids.repeat_interleave(expand_size, dim=0) + + model_kwargs = _expand_dict_for_generation(model_kwargs) + + if is_encoder_decoder: + if model_kwargs.get("encoder_outputs") is None: + raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") + model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) + + return input_ids, model_kwargs + + def _extract_past_from_model_output(self, outputs: ModelOutput, standardize_cache_format: bool = False): + past_key_values = None + if "past_key_values" in outputs: + past_key_values = outputs.past_key_values + elif "mems" in outputs: + past_key_values = outputs.mems + elif "past_buckets_states" in outputs: + past_key_values = outputs.past_buckets_states + + # Bloom fix: standardizes the cache format when requested + if standardize_cache_format and hasattr(self, "_convert_to_standard_cache"): + batch_size = outputs.logits.shape[0] + past_key_values = self._convert_to_standard_cache(past_key_values, batch_size=batch_size) + return past_key_values + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + if getattr(outputs, "state", None) is not None: + model_kwargs["state"] = outputs.state + + # update token_type_ids with last value + if "token_type_ids" in model_kwargs: + token_type_ids = model_kwargs["token_type_ids"] + model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) + + if not is_encoder_decoder: + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 + ) + else: + # update decoder attention mask + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + model_kwargs["decoder_attention_mask"] = torch.cat( + [decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))], + dim=-1, + ) + + return model_kwargs + + def _reorder_cache(self, past_key_values, beam_idx): + raise NotImplementedError( + f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to" + f" enable beam search for {self.__class__}" + ) + + def _get_logits_warper( + self, + generation_config: GenerationConfig, + ) -> LogitsProcessorList: + """ + This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances + used for multinomial sampling. + """ + + # instantiate warpers list + warpers = LogitsProcessorList() + + # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a + # better score (i.e. keep len(list(generation_config.eos_token_id)) + 1) + if generation_config.num_beams > 1: + if isinstance(generation_config.eos_token_id, list): + min_tokens_to_keep = len(generation_config.eos_token_id) + 1 + else: + min_tokens_to_keep = 2 + else: + min_tokens_to_keep = 1 + + # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files + # all samplers can be found in `generation_utils_samplers.py` + if generation_config.temperature is not None and generation_config.temperature != 1.0: + warpers.append(TemperatureLogitsWarper(generation_config.temperature)) + if generation_config.top_k is not None and generation_config.top_k != 0: + warpers.append(TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) + if generation_config.top_p is not None and generation_config.top_p < 1.0: + warpers.append(TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) + if generation_config.typical_p is not None and generation_config.typical_p < 1.0: + warpers.append( + TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep) + ) + if generation_config.epsilon_cutoff is not None and 0.0 < generation_config.epsilon_cutoff < 1.0: + warpers.append( + EpsilonLogitsWarper(epsilon=generation_config.epsilon_cutoff, min_tokens_to_keep=min_tokens_to_keep) + ) + if generation_config.eta_cutoff is not None and 0.0 < generation_config.eta_cutoff < 1.0: + warpers.append( + EtaLogitsWarper(epsilon=generation_config.eta_cutoff, min_tokens_to_keep=min_tokens_to_keep) + ) + # `LogitNormalization` should always be the last logit processor, when present + if generation_config.renormalize_logits is True: + warpers.append(LogitNormalization()) + return warpers + + def _get_generation_mode( + self, generation_config: GenerationConfig, assistant_model: Optional["PreTrainedModel"] + ) -> GenerationMode: + """ + Returns the generation mode triggered by a [`GenerationConfig`] instance. + """ + if generation_config.constraints is not None or generation_config.force_words_ids is not None: + generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH + elif generation_config.num_beams == 1: + if generation_config.do_sample is False: + if ( + generation_config.top_k is not None + and generation_config.top_k > 1 + and generation_config.penalty_alpha is not None + and generation_config.penalty_alpha > 0 + ): + generation_mode = GenerationMode.CONTRASTIVE_SEARCH + else: + generation_mode = GenerationMode.GREEDY_SEARCH + else: + generation_mode = GenerationMode.SAMPLE + else: + if generation_config.num_beam_groups > 1: + generation_mode = GenerationMode.GROUP_BEAM_SEARCH + elif generation_config.do_sample is True: + generation_mode = GenerationMode.BEAM_SAMPLE + else: + generation_mode = GenerationMode.BEAM_SEARCH + + # Assisted generation may extend some generation modes + if assistant_model is not None: + if generation_mode in ("greedy_search", "sample"): + generation_mode = GenerationMode.ASSISTED_GENERATION + else: + raise ValueError( + "You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate " + "is only supported with Greedy Search and Sample." + ) + return generation_mode + + def _get_logits_processor( + self, + generation_config: GenerationConfig, + input_ids_seq_length: int, + encoder_input_ids: torch.LongTensor, + prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], + logits_processor: Optional[LogitsProcessorList], + model_kwargs: Optional[Dict[str, Any]] = None, + negative_prompt_ids: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + ) -> LogitsProcessorList: + """ + This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] + instances used to modify the scores of the language model head. + """ + # instantiate processors list + processors = LogitsProcessorList() + + if generation_config.guidance_scale is not None and generation_config.guidance_scale != 1: + processors.append( + UnbatchedClassifierFreeGuidanceLogitsProcessor( + generation_config.guidance_scale, + self, + unconditional_ids=negative_prompt_ids, + unconditional_attention_mask=negative_prompt_attention_mask, + use_cache=model_kwargs["use_cache"], + ) + ) + if generation_config.sequence_bias is not None: + processors.append(SequenceBiasLogitsProcessor(sequence_bias=generation_config.sequence_bias)) + + if generation_config.diversity_penalty is not None and generation_config.diversity_penalty > 0.0: + processors.append( + HammingDiversityLogitsProcessor( + diversity_penalty=generation_config.diversity_penalty, + num_beams=generation_config.num_beams, + num_beam_groups=generation_config.num_beam_groups, + ) + ) + if ( + generation_config.encoder_repetition_penalty is not None + and generation_config.encoder_repetition_penalty != 1.0 + ): + processors.append( + EncoderRepetitionPenaltyLogitsProcessor( + penalty=generation_config.encoder_repetition_penalty, encoder_input_ids=encoder_input_ids + ) + ) + if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: + processors.append(RepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) + if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: + processors.append(NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) + if ( + generation_config.encoder_no_repeat_ngram_size is not None + and generation_config.encoder_no_repeat_ngram_size > 0 + ): + processors.append( + EncoderNoRepeatNGramLogitsProcessor(generation_config.encoder_no_repeat_ngram_size, encoder_input_ids) + ) + if generation_config.bad_words_ids is not None: + processors.append( + NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) + ) + if ( + generation_config.min_length is not None + and generation_config.eos_token_id is not None + and generation_config.min_length > 0 + ): + processors.append(MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) + if ( + generation_config.min_new_tokens is not None + and generation_config.eos_token_id is not None + and generation_config.min_new_tokens > 0 + ): + processors.append( + MinNewTokensLengthLogitsProcessor( + input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id + ) + ) + if prefix_allowed_tokens_fn is not None: + processors.append( + PrefixConstrainedLogitsProcessor( + prefix_allowed_tokens_fn, generation_config.num_beams // generation_config.num_beam_groups + ) + ) + if generation_config.forced_bos_token_id is not None: + processors.append(ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) + if generation_config.forced_eos_token_id is not None: + processors.append( + ForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) + ) + if generation_config.remove_invalid_values is True: + processors.append(InfNanRemoveLogitsProcessor()) + if generation_config.exponential_decay_length_penalty is not None: + processors.append( + ExponentialDecayLengthPenalty( + generation_config.exponential_decay_length_penalty, + generation_config.eos_token_id, + input_ids_seq_length, + ) + ) + if generation_config.suppress_tokens is not None: + processors.append(SuppressTokensLogitsProcessor(generation_config.suppress_tokens)) + if generation_config.begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = ( + begin_index + if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) + else begin_index + 1 + ) + if generation_config.forced_decoder_ids is not None: + # generation starts after the last token that is forced + begin_index += generation_config.forced_decoder_ids[-1][0] + processors.append( + SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) + ) + if generation_config.forced_decoder_ids is not None: + processors.append(ForceTokensLogitsProcessor(generation_config.forced_decoder_ids)) + processors = self._merge_criteria_processor_list(processors, logits_processor) + # `LogitNormalization` should always be the last logit processor, when present + if generation_config.renormalize_logits is True: + processors.append(LogitNormalization()) + return processors + + def _get_stopping_criteria( + self, generation_config: GenerationConfig, stopping_criteria: Optional[StoppingCriteriaList] + ) -> StoppingCriteriaList: + criteria = StoppingCriteriaList() + if generation_config.max_length is not None: + max_position_embeddings = getattr(self.config, "max_position_embeddings", None) + criteria.append( + MaxLengthCriteria( + max_length=generation_config.max_length, + max_position_embeddings=max_position_embeddings, + ) + ) + if generation_config.max_time is not None: + criteria.append(MaxTimeCriteria(max_time=generation_config.max_time)) + criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) + return criteria + + def _merge_criteria_processor_list( + self, + default_list: Union[LogitsProcessorList, StoppingCriteriaList], + custom_list: Union[LogitsProcessorList, StoppingCriteriaList], + ) -> Union[LogitsProcessorList, StoppingCriteriaList]: + if len(custom_list) == 0: + return default_list + for default in default_list: + for custom in custom_list: + if type(custom) is type(default): + object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" + raise ValueError( + f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" + f" `.generate()`, but it has already been created with the values {default}. {default} has been" + " created by passing the corresponding arguments to generate or by the model's config default" + f" values. If you just want to change the default values of {object_type} consider passing" + f" them as arguments to `.generate()` instead of using a custom {object_type}." + ) + default_list.extend(custom_list) + return default_list + + def compute_transition_scores( + self, + sequences: torch.Tensor, + scores: Tuple[torch.Tensor], + beam_indices: Optional[torch.Tensor] = None, + normalize_logits: bool = False, + ) -> torch.Tensor: + """ + Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was + used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. + + Parameters: + sequences (`torch.LongTensor`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or + shorter if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)`): + Transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of + `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with + each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at + generate-time. + normalize_logits (`bool`, *optional*, defaults to `False`): + Whether to normalize the logits (which, for legacy reasons, may be unnormalized). + + Return: + `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing + the transition scores (logits) + + Examples: + + ```python + >>> from transformers import GPT2Tokenizer, AutoModelForCausalLM + >>> import numpy as np + + >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> tokenizer.pad_token_id = tokenizer.eos_token_id + >>> inputs = tokenizer(["Today is"], return_tensors="pt") + + >>> # Example 1: Print the scores for each token generated with Greedy Search + >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, normalize_logits=True + ... ) + >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for + >>> # encoder-decoder models, like BART or T5. + >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] + >>> generated_tokens = outputs.sequences[:, input_length:] + >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): + ... # | token | token string | logits | probability + ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") + | 262 | the | -1.414 | 24.33% + | 1110 | day | -2.609 | 7.36% + | 618 | when | -2.010 | 13.40% + | 356 | we | -1.859 | 15.58% + | 460 | can | -2.508 | 8.14% + + >>> # Example 2: Reconstruct the sequence scores from Beam Search + >>> outputs = model.generate( + ... **inputs, + ... max_new_tokens=5, + ... num_beams=4, + ... num_return_sequences=4, + ... return_dict_in_generate=True, + ... output_scores=True, + ... ) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False + ... ) + >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. + >>> # Tip 1: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the + >>> # use case, you might want to recompute it with `normalize_logits=True`. + >>> # Tip 2: the output length does NOT include the input length + >>> output_length = np.sum(transition_scores.numpy() < 0, axis=1) + >>> length_penalty = model.generation_config.length_penalty + >>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty) + >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) + True + ```""" + # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent + # to a beam search approach were the first (and only) beam is always selected + if beam_indices is None: + beam_indices = torch.arange(scores[0].shape[0]).view(-1, 1).to(sequences.device) + beam_indices = beam_indices.expand(-1, len(scores)) + + # 2. reshape scores as [batch_size*vocab_size, # generation steps] with # generation steps being + # seq_len - input_length + scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1) + + # 3. Optionally normalize the logits (across the vocab dimension) + if normalize_logits: + scores = scores.reshape(-1, self.config.vocab_size, scores.shape[-1]) + scores = torch.nn.functional.log_softmax(scores, dim=1) + scores = scores.reshape(-1, scores.shape[-1]) + + # 4. cut beam_indices to longest beam length + beam_indices_mask = beam_indices < 0 + max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() + beam_indices = beam_indices.clone()[:, :max_beam_length] + beam_indices_mask = beam_indices_mask[:, :max_beam_length] + + # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards + beam_indices[beam_indices_mask] = 0 + + # 6. multiply beam_indices with vocab size to gather correctly from scores + beam_sequence_indices = beam_indices * self.config.vocab_size + + # 7. Define which indices contributed to scores + cut_idx = sequences.shape[-1] - max_beam_length + indices = sequences[:, cut_idx:] + beam_sequence_indices + + # 8. Compute scores + transition_scores = scores.gather(0, indices) + + # 9. Mask out transition_scores of beams that stopped early + transition_scores[beam_indices_mask] = 0 + + return transition_scores + + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not self.can_generate(): + generate_compatible_mappings = [ + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + # If a `Cache` instance is passed, checks whether the model is compatible with it + if isinstance(model_kwargs.get("past_key_values", None), Cache) and not self._supports_cache_class: + raise ValueError( + f"{self.__class__.__name__} does not support an instance of `Cache` as `past_key_values`. Please " + "check the model documentation for supported cache formats." + ) + + # Excludes arguments that are handled before calling any model function + if self.config.is_encoder_decoder: + for key in ["decoder_input_ids"]: + model_kwargs.pop(key, None) + + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) + if "kwargs" in model_args or "model_kwargs" in model_args: + model_args |= set(inspect.signature(self.forward).parameters) + + # Encoder-Decoder models may also need Encoder arguments from `model_kwargs` + if self.config.is_encoder_decoder: + base_model = getattr(self, self.base_model_prefix, None) + + # allow encoder kwargs + encoder = getattr(self, "encoder", None) + # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`. + # Also, it has `base_model_prefix = "encoder_decoder"` but there is no `self.encoder_decoder` + # TODO: A better way to handle this. + if encoder is None and base_model is not None: + encoder = getattr(base_model, "encoder", None) + + if encoder is not None: + encoder_model_args = set(inspect.signature(encoder.forward).parameters) + model_args |= encoder_model_args + + # allow decoder kwargs + decoder = getattr(self, "decoder", None) + if decoder is None and base_model is not None: + decoder = getattr(base_model, "decoder", None) + + if decoder is not None: + decoder_model_args = set(inspect.signature(decoder.forward).parameters) + model_args |= {f"decoder_{x}" for x in decoder_model_args} + + # allow assistant_encoder_outputs to be passed if we're doing assisted generating + if "assistant_encoder_outputs" in model_kwargs: + model_args |= {"assistant_encoder_outputs"} + + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + + def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length): + """Performs validation related to the resulting generated length""" + + # 1. Max length warnings related to poor parameterization + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + # 20 is the default max_length of the generation config + warnings.warn( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the " + "generation length. We recommend setting `max_new_tokens` to control the maximum length of the " + "generation.", + UserWarning, + ) + if input_ids_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + warnings.warn( + f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`.", + UserWarning, + ) + + # 2. Min length warnings due to unfeasible parameter combinations + min_length_error_suffix = ( + " Generation will stop at the defined maximum length. You should decrease the minimum length and/or " + "increase the maximum length." + ) + if has_default_max_length: + min_length_error_suffix += ( + f" Note that `max_length` is set to {generation_config.max_length}, its default value." + ) + if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: + warnings.warn( + f"Unfeasible length constraints: `min_length` ({generation_config.min_length}) is larger than" + f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix, + UserWarning, + ) + if generation_config.min_new_tokens is not None: + min_length = generation_config.min_new_tokens + input_ids_length + if min_length > generation_config.max_length: + warnings.warn( + f"Unfeasible length constraints: `min_new_tokens` ({generation_config.min_new_tokens}), when " + f"added to the prompt length ({input_ids_length}), is larger than" + f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix, + UserWarning, + ) + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + synced_gpus: Optional[bool] = None, + assistant_model: Optional["PreTrainedModel"] = None, + streamer: Optional["BaseStreamer"] = None, + negative_prompt_ids: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> Union[GenerateOutput, torch.LongTensor]: + r""" + + Generates sequences of token ids for models with a language modeling head. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](../generation_strategies). + + + + Parameters: + inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): + The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the + method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` + should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of + `input_ids`, `input_values`, `input_features`, or `pixel_values`. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complement the default stopping criteria built from arguments and a + generation config. If a stopping criteria is passed that is already created with the arguments or a + generation config an error is thrown. If your stopping criteria depends on the `scores` input, make + sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. This feature is + intended for advanced users. + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): + If provided, this function constraints the beam search to allowed tokens only at each step. If not + provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and + `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned + on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful + for constrained generation conditioned on the prefix, as described in [Autoregressive Entity + Retrieval](https://arxiv.org/abs/2010.00904). + synced_gpus (`bool`, *optional*): + Whether to continue running the while loop until max_length. Unless overridden this flag will be set to + `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished + generating before other GPUs. Otherwise it'll be set to `False`. + assistant_model (`PreTrainedModel`, *optional*): + An assistant model that can be used to accelerate generation. The assistant model must have the exact + same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model + is much faster than running generation with the model you're calling generate from. As such, the + assistant model should be much smaller. + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + negative_prompt_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + The negative prompt needed for some processors such as CFG. The batch size must match the input batch + size. This is an experimental feature, subject to breaking API changes in future versions. + negative_prompt_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Attention_mask for `negative_prompt_ids`. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. + + If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GreedySearchDecoderOnlyOutput`], + - [`~generation.SampleDecoderOnlyOutput`], + - [`~generation.BeamSearchDecoderOnlyOutput`], + - [`~generation.BeamSampleDecoderOnlyOutput`] + + If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GreedySearchEncoderDecoderOutput`], + - [`~generation.SampleEncoderDecoderOutput`], + - [`~generation.BeamSearchEncoderDecoderOutput`], + - [`~generation.BeamSampleEncoderDecoderOutput`] + """ + + if synced_gpus is None: + if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1: + synced_gpus = True + else: + synced_gpus = False + + # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + self._validate_model_class() + + # priority: `generation_config` argument > `model.generation_config` (the default generation config) + if generation_config is None: + # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, + # two conditions must be met + # 1) the generation config must have been created from the model config (`_from_model_config` field); + # 2) the generation config must have seen no modification since its creation (the hash is the same). + if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( + self.generation_config + ): + new_generation_config = GenerationConfig.from_model_config(self.config) + if new_generation_config != self.generation_config: + warnings.warn( + "You have modified the pretrained model configuration to control generation. This is a" + " deprecated strategy to control generation and will be removed soon, in a future version." + " Please use and modify the model generation configuration (see" + " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" + ) + self.generation_config = new_generation_config + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + generation_config.validate() + self._validate_model_kwargs(model_kwargs.copy()) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask", None) is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + # 3. Define model inputs + # inputs_tensor has to be defined + # model_input_name is defined if model-specific keyword input is passed + # otherwise model_input_name is None + # all model-specific keyword inputs are removed from `model_kwargs` + inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( + inputs, generation_config.bos_token_id, model_kwargs + ) + batch_size = inputs_tensor.shape[0] + + # 4. Define other model kwargs + model_kwargs["output_attentions"] = generation_config.output_attentions + model_kwargs["output_hidden_states"] = generation_config.output_hidden_states + # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are + # generating the first new token or not, and we only want to use the embeddings for the first new token) + if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds": + model_kwargs["use_cache"] = True + else: + model_kwargs["use_cache"] = generation_config.use_cache + + accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys()) + requires_attention_mask = "encoder_outputs" not in model_kwargs + + if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: + model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( + inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id + ) + + # decoder-only models should use left-padding for generation + if not self.config.is_encoder_decoder: + # If `input_ids` was given, check if the last id in any sequence is `pad_token_id` + # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off. + if ( + generation_config.pad_token_id is not None + and len(inputs_tensor.shape) == 2 + and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0 + ): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + + if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: + # if model is encoder decoder encoder_outputs are created + # and added to `model_kwargs` + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( + inputs_tensor, model_kwargs, model_input_name + ) + + # 5. Prepare `input_ids` which will be used for auto-regressive generation + if self.config.is_encoder_decoder: + input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( + batch_size=batch_size, + model_input_name=model_input_name, + model_kwargs=model_kwargs, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + device=inputs_tensor.device, + ) + else: + input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") + + if streamer is not None: + streamer.put(input_ids.cpu()) + + # 6. Prepare `max_length` depending on other stopping criteria. + input_ids_length = input_ids.shape[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if generation_config.max_new_tokens is not None: + if not has_default_max_length and generation_config.max_length is not None: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_length + self._validate_generated_length(generation_config, input_ids_length, has_default_max_length) + + # 7. determine generation mode + generation_mode = self._get_generation_mode(generation_config, assistant_model) + + if streamer is not None and (generation_config.num_beams > 1): + raise ValueError( + "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1." + ) + + if self.device.type != input_ids.device.type: + warnings.warn( + "You are calling .generate() with the `input_ids` being on a device type different" + f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" + f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." + " Please make sure that you have put `input_ids` to the" + f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" + " running `.generate()`.", + UserWarning, + ) + + # 8. prepare distribution pre_processing samplers + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_length, + encoder_input_ids=inputs_tensor, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + model_kwargs=model_kwargs, + negative_prompt_ids=negative_prompt_ids, + negative_prompt_attention_mask=negative_prompt_attention_mask, + ) + + # 9. prepare stopping criteria + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + # 10. go into different generation modes + if generation_mode == GenerationMode.ASSISTED_GENERATION: + if generation_config.num_return_sequences > 1: + raise ValueError( + "num_return_sequences has to be 1 when doing assisted generate, " + f"but is {generation_config.num_return_sequences}." + ) + if batch_size > 1: + raise ValueError("assisted generate is only supported for batch_size = 1") + if not model_kwargs["use_cache"]: + raise ValueError("assisted generate requires `use_cache=True`") + + assistant_accepts_encoder_outputs = "encoder_outputs" in set( + inspect.signature(assistant_model.forward).parameters.keys() + ) + + # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs + if assistant_model.config.is_encoder_decoder and "assistant_encoder_outputs" not in model_kwargs: + assistant_model_kwargs = copy.deepcopy(model_kwargs) + inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs( + inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs + ) + assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation( + inputs_tensor, assistant_model_kwargs, model_input_name + ) + model_kwargs["assistant_encoder_outputs"] = assistant_model_kwargs["encoder_outputs"] + + if ( + not assistant_model.config.is_encoder_decoder + and assistant_accepts_encoder_outputs + and "encoder_outputs" in model_kwargs + ): + # some assistants might be assymetric (many more enc layers than dec layers) + # encoder-decoder models that share the exact same encoder as the teacher + # in this case the assistant only needs to load the light-weight decoder, + # but still requires `encoder_outputs` to be passed + model_kwargs["assistant_encoder_outputs"] = model_kwargs["encoder_outputs"] + + # 12. run assisted generate + return self.assisted_decoding( + input_ids, + assistant_model=assistant_model, + do_sample=generation_config.do_sample, + logits_processor=logits_processor, + logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + if generation_mode == GenerationMode.GREEDY_SEARCH: + # 11. run greedy search + return self.greedy_search( + input_ids, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH: + if not model_kwargs["use_cache"]: + raise ValueError("Contrastive search requires `use_cache=True`") + + return self.contrastive_search( + input_ids, + top_k=generation_config.top_k, + penalty_alpha=generation_config.penalty_alpha, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + sequential=generation_config.low_memory, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.SAMPLE: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config) + + # 12. expand input_ids with `num_return_sequences` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_return_sequences, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 13. run sample + return self.sample( + input_ids, + logits_processor=logits_processor, + logits_warper=logits_warper, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.BEAM_SEARCH: + # 11. prepare beam search scorer + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + # 12. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + # 13. run beam search + return self.beam_search( + input_ids, + beam_scorer, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.BEAM_SAMPLE: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config) + + # 12. prepare beam search scorer + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + + # 13. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 14. run beam sample + return self.beam_sample( + input_ids, + beam_scorer, + logits_processor=logits_processor, + logits_warper=logits_warper, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH: + # 11. prepare beam search scorer + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + num_beam_groups=generation_config.num_beam_groups, + max_length=generation_config.max_length, + ) + # 12. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + # 13. run beam search + return self.group_beam_search( + input_ids, + beam_scorer, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH: + final_constraints = [] + if generation_config.constraints is not None: + final_constraints = generation_config.constraints + + if generation_config.force_words_ids is not None: + + def typeerror(): + raise ValueError( + "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]` " + f"of positive integers, but is {generation_config.force_words_ids}." + ) + + if ( + not isinstance(generation_config.force_words_ids, list) + or len(generation_config.force_words_ids) == 0 + ): + typeerror() + + for word_ids in generation_config.force_words_ids: + if isinstance(word_ids[0], list): + if not isinstance(word_ids, list) or len(word_ids) == 0: + typeerror() + if any(not isinstance(token_ids, list) for token_ids in word_ids): + typeerror() + if any( + any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) + for token_ids in word_ids + ): + typeerror() + + constraint = DisjunctiveConstraint(word_ids) + else: + if not isinstance(word_ids, list) or len(word_ids) == 0: + typeerror() + if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids): + typeerror() + + constraint = PhrasalConstraint(word_ids) + final_constraints.append(constraint) + + # 11. prepare beam search scorer + constrained_beam_scorer = ConstrainedBeamSearchScorer( + constraints=final_constraints, + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + # 12. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + # 13. run beam search + return self.constrained_beam_search( + input_ids, + constrained_beam_scorer=constrained_beam_scorer, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + @torch.no_grad() + def contrastive_search( + self, + input_ids: torch.LongTensor, + top_k: Optional[int] = 1, + penalty_alpha: Optional[float] = 0, + logits_processor: Optional[LogitsProcessorList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + sequential: Optional[bool] = None, + **model_kwargs, + ) -> Union[ContrastiveSearchOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **contrastive search** and can + be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.contrastive_search`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + top_k (`int`, *optional*, defaults to 1): + The size of the candidate set that is used to re-rank for contrastive search + penalty_alpha (`float`, *optional*, defaults to 0): + The degeneration penalty for contrastive search; activate when it is larger than 0 + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + sequential (`bool`, *optional*): + Switches topk hidden state computation from parallel to sequential to reduce memory if True. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `forward` function of the model. + If model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.ContrastiveSearchDecoderOnlyOutput`], [`~generation.ContrastiveSearchEncoderDecoderOutput`] + or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.ContrastiveSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.ContrastiveSearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") + >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") + >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token + >>> model.config.pad_token_id = model.config.eos_token_id + >>> input_prompt = "DeepMind Company is" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt") + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)]) + >>> outputs = model.contrastive_search( + ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria + ... ) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + sequential = sequential if sequential is not None else self.generation_config.low_memory + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + + this_peer_finished = False # used by synced_gpus only + batch_size = input_ids.shape[0] + + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; + # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step + if model_kwargs.get("past_key_values") is None: + # prepare inputs + model_kwargs["use_cache"] = True + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save + # the `encoder_outputs` + outputs = self( + **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions + ) + + # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with + # previous tokens) + if self.config.is_encoder_decoder: + last_hidden_states = outputs.decoder_hidden_states[-1] + else: + last_hidden_states = outputs.hidden_states[-1] + + # next logit for contrastive search to select top-k candidate tokens + logit_for_next_step = outputs.logits[:, -1, :] + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + standardize_cache_format=True, + ) + if not sequential: + # Expands model inputs top_k times, for batched forward passes (akin to beam search). + _, model_kwargs = self._expand_inputs_for_generation( + expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs + ) + + past_key_values = model_kwargs.get("past_key_values") + if past_key_values is None: + raise ValueError( + f"{self.__class__.__name__} does not support caching and therefore **can't** be used " + "for contrastive search." + ) + elif ( + not isinstance(past_key_values[0], (tuple, torch.Tensor)) + or past_key_values[0][0].shape[0] != batch_size + ): + raise ValueError( + f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " + "used for contrastive search without further modifications." + ) + + # contrastive_search main logic start: + # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by + # degeneration penalty + logit_for_next_step = logits_processor(input_ids, logit_for_next_step) + logit_for_next_step = logits_warper(input_ids, logit_for_next_step) + next_probs = nn.functional.softmax(logit_for_next_step, dim=-1) + top_k_probs, top_k_ids = torch.topk(next_probs, dim=-1, k=top_k) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (logit_for_next_step,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # Replicates the new past_key_values to match the `top_k` candidates + new_key_values = [] + for layer in model_kwargs["past_key_values"]: + items = [] + # item is either the key or the value matrix + for item in layer: + if sequential: + items.append(item.repeat_interleave(1, dim=0)) + else: + items.append(item.repeat_interleave(top_k, dim=0)) + new_key_values.append(tuple(items)) + model_kwargs["past_key_values"] = tuple(new_key_values) + + if sequential: + all_outputs = {key: [] for key in outputs} # defined in first loop iteration + all_last_hstates, all_hstates, all_logits = [], [], [] + for i in range(top_k): + # compute the candidate tokens by the language model and collect their hidden_states + next_model_inputs = self.prepare_inputs_for_generation(top_k_ids[:, i].view(-1, 1), **model_kwargs) + + outputs = self( + **next_model_inputs, + return_dict=True, + output_hidden_states=True, + output_attentions=output_attentions, + ) + for key in all_outputs: + all_outputs[key].append(outputs[key]) + + if self.config.is_encoder_decoder: + next_hidden = outputs.decoder_hidden_states[-1] + full_hidden_states = outputs.decoder_hidden_states + + else: + next_hidden = outputs.hidden_states[-1] + full_hidden_states = outputs.hidden_states + + all_last_hstates.append(torch.squeeze(next_hidden, 0)) + all_hstates.append(full_hidden_states) + all_logits.append(outputs.logits[:, -1, :]) + + # stack hidden states + next_hidden = torch.stack([all_last_hstates[i] for i in range(top_k)], dim=0) + final_full_hstates = [0 for i in range(len(full_hidden_states))] + for layer in range(len(full_hidden_states)): + final_full_hstates[layer] = torch.stack( + [torch.squeeze(all_hstates[i][layer], 0) for i in range(top_k)], dim=0 + ) + full_hidden_states = tuple(final_full_hstates) + + # stack logits + logits = torch.cat(all_logits, dim=0) + + else: + # compute the candidate tokens by the language model and collect their hidden_states + # assembles top_k_ids into batch of size k + next_model_inputs = self.prepare_inputs_for_generation(top_k_ids.view(-1, 1), **model_kwargs) + + outputs = self( + **next_model_inputs, + return_dict=True, + output_hidden_states=True, + output_attentions=output_attentions, + ) + # name is different for encoder-decoder and decoder-only models + if self.config.is_encoder_decoder: + next_hidden = outputs.decoder_hidden_states[-1] + full_hidden_states = outputs.decoder_hidden_states + else: + next_hidden = outputs.hidden_states[-1] + full_hidden_states = outputs.hidden_states + + logits = outputs.logits[:, -1, :] + + context_hidden = last_hidden_states.repeat_interleave(top_k, dim=0) + + # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the + # model confidence. Keeping `selected_idx` on CPU enables multi-device contrastive search and doesn't + # introduce (noticeable) slowdowns on single-device runs. + selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) + selected_idx = selected_idx.to("cpu") + + # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing + # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores + # (model confidence minus degeneration penalty); (6) decoder hidden_states + next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx] + next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k)) + next_hidden = next_hidden[range(batch_size), selected_idx, :] + last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1) + + next_decoder_hidden_states = () + for layer in full_hidden_states: + layer = torch.stack(torch.split(layer, top_k))[range(batch_size), selected_idx, :] + next_decoder_hidden_states += (layer,) + + # generate past_key_values cache of only the selected token + if sequential: + next_model_input = self.prepare_inputs_for_generation( + top_k_ids[:, selected_idx].view(-1, 1), **model_kwargs + ) + + selected_outputs = self( + **next_model_input, + return_dict=True, + output_hidden_states=False, + output_attentions=False, + ) + next_past_key_values = selected_outputs["past_key_values"] + + else: + next_past_key_values = self._extract_past_from_model_output(outputs, standardize_cache_format=True) + new_key_values = () + for layer in next_past_key_values: + items = () + # item is either the key or the value matrix + for item in layer: + item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz] + item = item[range(batch_size), selected_idx, ...] # [B, num_head, seq_len, esz] + items += (item,) + new_key_values += (items,) + next_past_key_values = new_key_values + + logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(batch_size), selected_idx, :] + + # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration + if self.config.is_encoder_decoder: + next_step_cross_attentions = () + next_step_decoder_attentions = () + if output_attentions: + for layer in outputs.cross_attentions: + layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] + next_step_cross_attentions += (layer,) + for layer in outputs.decoder_attentions: + layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] + next_step_decoder_attentions += (layer,) + outputs = Seq2SeqLMOutput( + past_key_values=next_past_key_values, + decoder_hidden_states=next_decoder_hidden_states, + decoder_attentions=next_step_decoder_attentions or None, + cross_attentions=next_step_cross_attentions or None, + ) + else: + next_step_attentions = () + if output_attentions: + for layer in outputs.attentions: + layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] + next_step_attentions += (layer,) + outputs = CausalLMOutputWithPast( + past_key_values=next_past_key_values, + hidden_states=next_decoder_hidden_states, + attentions=next_step_attentions or None, + ) + # contrastive_search main logic end + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + # finished sentences should have their next token be a padding token + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # if eos_token was found in one sentence, set sentence to finished + if eos_token_id_tensor is not None: + unfinished_sequences = unfinished_sequences.mul( + next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0) + ) + + # stop when each sentence is finished + if unfinished_sequences.max() == 0: + this_peer_finished = True + + # stop if we exceed the maximum length + if stopping_criteria(input_ids, scores): + this_peer_finished = True + + if this_peer_finished and not synced_gpus: + break + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + # Contrastive search works by forward looking at the next token, so we need to exclude it from + # `past_key_values` to be consistent with the other decoding methods + if model_kwargs.get("past_key_values") is not None: + past_key_values = [] + for layer in model_kwargs["past_key_values"]: + layer_past_key_values = [] + for item in layer: + layer_past_key_values.append(item[..., :-1, :]) + past_key_values.append(tuple(layer_past_key_values)) + model_kwargs["past_key_values"] = tuple(past_key_values) + + if self.config.is_encoder_decoder: + return ContrastiveSearchEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return ContrastiveSearchDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + def greedy_search( + self, + input_ids: torch.LongTensor, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + **model_kwargs, + ) -> Union[GreedySearchOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be + used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate() + instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `forward` function of the model. + If model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + + >>> input_prompt = "It might be possible to" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) + + >>> outputs = model.greedy_search( + ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ["It might be possible to get a better understanding of the nature of the problem, but it's not"] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + + this_peer_finished = False # used by synced_gpus only + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + # prepare model inputs + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_tokens_scores = logits_processor(input_ids, next_token_logits) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_tokens_scores,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # argmax + next_tokens = torch.argmax(next_tokens_scores, dim=-1) + + # finished sentences should have their next token be a padding token + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # if eos_token was found in one sentence, set sentence to finished + if eos_token_id_tensor is not None: + unfinished_sequences = unfinished_sequences.mul( + next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0) + ) + + # stop when each sentence is finished + if unfinished_sequences.max() == 0: + this_peer_finished = True + + # stop if we exceed the maximum length + if stopping_criteria(input_ids, scores): + this_peer_finished = True + + if this_peer_finished and not synced_gpus: + break + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GreedySearchEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GreedySearchDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + def sample( + self, + input_ids: torch.LongTensor, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + **model_kwargs, + ) -> Union[SampleOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and + can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.sample`] directly. Use generate() instead. + For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.SampleDecoderOnlyOutput`], [`~generation.SampleEncoderDecoderOutput`] or `torch.LongTensor`: + A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.SampleEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... TopKLogitsWarper, + ... TemperatureLogitsWarper, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token + >>> model.config.pad_token_id = model.config.eos_token_id + >>> model.generation_config.pad_token_id = model.config.eos_token_id + + >>> input_prompt = "Today is a beautiful day, and" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> # instantiate logits processors + >>> logits_warper = LogitsProcessorList( + ... [ + ... TopKLogitsWarper(50), + ... TemperatureLogitsWarper(0.7), + ... ] + ... ) + + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) + + >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT + >>> outputs = model.sample( + ... input_ids, + ... logits_processor=logits_processor, + ... logits_warper=logits_warper, + ... stopping_criteria=stopping_criteria, + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Today is a beautiful day, and we must do everything possible to make it a day of celebration.'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + + this_peer_finished = False # used by synced_gpus only + # auto-regressive generation + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + # prepare model inputs + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + next_token_scores = logits_warper(input_ids, next_token_scores) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + + # finished sentences should have their next token be a padding token + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # if eos_token was found in one sentence, set sentence to finished + if eos_token_id_tensor is not None: + unfinished_sequences = unfinished_sequences.mul( + next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0) + ) + + # stop when each sentence is finished + if unfinished_sequences.max() == 0: + this_peer_finished = True + + # stop if we exceed the maximum length + if stopping_criteria(input_ids, scores): + this_peer_finished = True + + if this_peer_finished and not synced_gpus: + break + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return SampleEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return SampleDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + def _temporary_reorder_cache(self, past_key_values, beam_idx): + """ + Temporary function to handle the different types of cache reordering processes while we roll out `Cache`. + + TODO: standardize cache formats and make all models compatible with `Cache`. It would remove the need + for this function, with `Cache.reorder_cache` being the sole remaining code path + """ + model_class = self.__class__.__name__.lower() + # Exception 1: code path for models using the legacy cache format + if isinstance(past_key_values, (tuple, list)): + past_key_values = self._reorder_cache(past_key_values, beam_idx) + # Exception 2: models with different cache formats. These are limited to `DynamicCache` until their + # cache format is standardized, to avoid adding complexity to the codebase. + elif "bloom" in model_class or "gptbigcode" in model_class: + if not isinstance(past_key_values, DynamicCache): + raise ValueError( + f"Using an unsupported cache format with {model_class}. Currently, it only supports the " + "legacy tuple format or `DynamicCache`" + ) + past_key_values = self._reorder_cache(past_key_values, beam_idx) + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + # Standard code path: use the `Cache.reorder_cache` + else: + past_key_values.reorder_cache(beam_idx) + return past_key_values + + def beam_search( + self, + input_ids: torch.LongTensor, + beam_scorer: BeamScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + **model_kwargs, + ) -> Union[BeamSearchOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **beam search decoding** and + can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.beam_search`] directly. Use generate() + instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + beam_scorer (`BeamScorer`): + An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... BeamSearchScorer, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> # instantiate beam scorer + >>> beam_scorer = BeamSearchScorer( + ... batch_size=1, + ... num_beams=num_beams, + ... device=model.device, + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), + ... ] + ... ) + + >>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + if len(stopping_criteria) == 0: + warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + batch_size = len(beam_scorer._beam_hyps) + num_beams = beam_scorer.num_beams + + batch_beam_size, cur_len = input_ids.shape + + if num_beams * batch_size != batch_beam_size: + raise ValueError( + f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + beam_indices = ( + tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None + ) + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens + # of the first beam are considered to avoid sampling the exact same tokens across all beams. + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores[:, 1:] = -1e9 + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * num_beams, vocab_size) + + next_token_scores_processed = logits_processor(input_ids, next_token_scores) + next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as( + next_token_scores_processed + ) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores_processed,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # reshape for beam search + vocab_size = next_token_scores.shape[-1] + next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + + # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam. + n_eos_tokens = len(eos_token_id) if eos_token_id else 0 + next_token_scores, next_tokens = torch.topk( + next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True + ) + + next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") + next_tokens = next_tokens % vocab_size + + # stateless + beam_outputs = beam_scorer.process( + input_ids, + next_token_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + beam_scores = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + if model_kwargs["past_key_values"] is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], beam_idx + ) + + if return_dict_in_generate and output_scores: + beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) + + # increase cur_len + cur_len = cur_len + 1 + + if beam_scorer.is_done or stopping_criteria(input_ids, scores): + if not synced_gpus: + break + else: + this_peer_finished = True + + sequence_outputs = beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + + if self.config.is_encoder_decoder: + return BeamSearchEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return BeamSearchDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def beam_sample( + self, + input_ids: torch.LongTensor, + beam_scorer: BeamScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + **model_kwargs, + ) -> Union[BeamSampleOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **beam search multinomial + sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.beam_sample`] directly. Use generate() + instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + beam_scorer (`BeamScorer`): + A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.BeamSampleDecoderOnlyOutput`], [`~generation.BeamSampleEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.BeamSampleEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... TopKLogitsWarper, + ... TemperatureLogitsWarper, + ... BeamSearchScorer, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> # instantiate beam scorer + >>> beam_scorer = BeamSearchScorer( + ... batch_size=1, + ... max_length=model.config.max_length, + ... num_beams=num_beams, + ... device=model.device, + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] + ... ) + >>> # instantiate logits processors + >>> logits_warper = LogitsProcessorList( + ... [ + ... TopKLogitsWarper(50), + ... TemperatureLogitsWarper(0.7), + ... ] + ... ) + + >>> outputs = model.beam_sample( + ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + batch_size = len(beam_scorer._beam_hyps) + num_beams = beam_scorer.num_beams + + batch_beam_size, cur_len = input_ids.shape + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + beam_indices = ( + tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None + ) + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * num_beams, vocab_size) + + next_token_scores_processed = logits_processor(input_ids, next_token_scores) + next_token_scores_processed = logits_warper(input_ids, next_token_scores_processed) + next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as( + next_token_scores_processed + ) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores_processed,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # reshape for beam search + vocab_size = next_token_scores.shape[-1] + next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + + probs = nn.functional.softmax(next_token_scores, dim=-1) + + next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) + next_token_scores = torch.gather(next_token_scores, -1, next_tokens) + + next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) + next_tokens = torch.gather(next_tokens, -1, _indices) + + next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") + next_tokens = next_tokens % vocab_size + + # stateless + beam_outputs = beam_scorer.process( + input_ids, + next_token_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + beam_scores = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + if model_kwargs["past_key_values"] is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], beam_idx + ) + + if return_dict_in_generate and output_scores: + beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) + + # increase cur_len + cur_len = cur_len + 1 + + if beam_scorer.is_done or stopping_criteria(input_ids, scores): + if not synced_gpus: + break + else: + this_peer_finished = True + + sequence_outputs = beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + + if self.config.is_encoder_decoder: + return BeamSampleEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return BeamSampleDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def group_beam_search( + self, + input_ids: torch.LongTensor, + beam_scorer: BeamScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + **model_kwargs, + ): + r""" + Generates sequences of token ids for models with a language modeling head using **diverse beam search + decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.group_beam_search`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + beam_scorer (`BeamScorer`): + An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + + model_kwargs: + Additional model specific kwargs that will be forwarded to the `forward` function of the model. If + model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.BeamSearchDecoderOnlyOutput`] if [`~generation.BeamSearchDecoderOnlyOutput`] if + `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a + [`~generation.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... HammingDiversityLogitsProcessor, + ... BeamSearchScorer, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + + >>> # lets run diverse beam search using 6 beams + >>> num_beams = 6 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> # instantiate beam scorer + >>> beam_scorer = BeamSearchScorer( + ... batch_size=1, + ... max_length=model.config.max_length, + ... num_beams=num_beams, + ... device=model.device, + ... num_beam_groups=3, + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), + ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), + ... ] + ... ) + + >>> outputs = model.group_beam_search( + ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + num_beams = beam_scorer.num_beams + num_beam_groups = beam_scorer.num_beam_groups + num_sub_beams = num_beams // num_beam_groups + batch_size = len(beam_scorer._beam_hyps) // num_beam_groups + device = input_ids.device + + batch_beam_size, cur_len = input_ids.shape + + if return_dict_in_generate and output_scores: + beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)] + else: + beam_indices = None + + if num_beams * batch_size != batch_beam_size: + raise ValueError( + f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in + # the same group don't produce same tokens everytime. + beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) + beam_scores[:, ::num_sub_beams] = 0 + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + # predicted tokens in cur_len step + current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) + + # indices which will form the beams in the next time step + reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) + + # do one decoder step on all beams of all sentences in batch + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + if output_scores: + processed_score = torch.zeros_like(outputs.logits[:, -1, :]) + + for beam_group_idx in range(num_beam_groups): + group_start_idx = beam_group_idx * num_sub_beams + group_end_idx = min(group_start_idx + num_sub_beams, num_beams) + group_size = group_end_idx - group_start_idx + + # indices of beams of current group among all sentences in batch + batch_group_indices = [] + + for batch_idx in range(batch_size): + batch_group_indices.extend( + [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] + ) + group_input_ids = input_ids[batch_group_indices] + + # select outputs of beams of current group only + next_token_logits = outputs.logits[batch_group_indices, -1, :] + + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * group_size, vocab_size) + vocab_size = next_token_scores.shape[-1] + + next_token_scores_processed = logits_processor( + group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx + ) + next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1) + next_token_scores = next_token_scores.expand_as(next_token_scores_processed) + + if output_scores: + processed_score[batch_group_indices] = next_token_scores_processed + + # reshape for beam search + next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) + + # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam. + n_eos_tokens = len(eos_token_id) if eos_token_id else 0 + next_token_scores, next_tokens = torch.topk( + next_token_scores, max(2, 1 + n_eos_tokens) * group_size, dim=1, largest=True, sorted=True + ) + + next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") + next_tokens = next_tokens % vocab_size + + # stateless + process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None + beam_outputs = beam_scorer.process( + group_input_ids, + next_token_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=process_beam_indices, + group_index=beam_group_idx, + decoder_prompt_len=decoder_prompt_len, + ) + beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + if return_dict_in_generate and output_scores: + beam_indices[beam_group_idx] = tuple( + beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0])) + ) + + input_ids[batch_group_indices] = group_input_ids[beam_idx] + group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + current_tokens[batch_group_indices] = group_input_ids[:, -1] + + # (beam_idx // group_size) -> batch_idx + # (beam_idx % group_size) -> offset of idx inside the group + reordering_indices[batch_group_indices] = ( + num_beams * torch.div(beam_idx, group_size, rounding_mode="floor") + + group_start_idx + + (beam_idx % group_size) + ) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (processed_score,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + if model_kwargs["past_key_values"] is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], reordering_indices + ) + + # increase cur_len + cur_len = cur_len + 1 + + if beam_scorer.is_done or stopping_criteria(input_ids, scores): + if not synced_gpus: + break + else: + this_peer_finished = True + + final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None + sequence_outputs = beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=final_beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + + if self.config.is_encoder_decoder: + return BeamSearchEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return BeamSearchDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def constrained_beam_search( + self, + input_ids: torch.LongTensor, + constrained_beam_scorer: ConstrainedBeamSearchScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: Optional[bool] = None, + **model_kwargs, + ) -> Union[BeamSearchOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **constrained beam search + decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.constrained_beam_search`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + constrained_beam_scorer (`ConstrainedBeamSearchScorer`): + A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation, while satisfying a list of positive constraints. For more information, the + documentation of [`ConstrainedBeamSearchScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... ConstrainedBeamSearchScorer, + ... PhrasalConstraint, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> constraint_str = "Sie" + >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token + >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] + + + >>> # instantiate beam scorer + >>> beam_scorer = ConstrainedBeamSearchScorer( + ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), + ... ] + ... ) + + >>> outputs = model.constrained_beam_search( + ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt sind Sie?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + if len(stopping_criteria) == 0: + warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + batch_size = len(constrained_beam_scorer._beam_hyps) + num_beams = constrained_beam_scorer.num_beams + + batch_beam_size, cur_len = input_ids.shape + + if num_beams * batch_size != batch_beam_size: + raise ValueError( + f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + beam_indices = ( + tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None + ) + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens + # of the first beam are considered to avoid sampling the exact same tokens across all beams. + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores[:, 1:] = -1e9 + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * num_beams, vocab_size) + + next_token_scores_processed = logits_processor(input_ids, next_token_scores) + + next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as( + next_token_scores_processed + ) + + scores_for_all_vocab = next_token_scores.clone() + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # reshape for beam search + vocab_size = next_token_scores.shape[-1] + next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + + # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam. + n_eos_tokens = len(eos_token_id) if eos_token_id else 0 + next_token_scores, next_tokens = torch.topk( + next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True + ) + + next_indices = (next_tokens / vocab_size).long() + next_tokens = next_tokens % vocab_size + + # stateless + beam_outputs = constrained_beam_scorer.process( + input_ids, + next_token_scores, + next_tokens, + next_indices, + scores_for_all_vocab, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + beam_scores = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + if model_kwargs["past_key_values"] is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], beam_idx + ) + + if return_dict_in_generate and output_scores: + beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) + + # increase cur_len + cur_len = cur_len + 1 + + if constrained_beam_scorer.is_done or stopping_criteria(input_ids, scores): + if not synced_gpus: + break + else: + this_peer_finished = True + + sequence_outputs = constrained_beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + if self.config.is_encoder_decoder: + return BeamSearchEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return BeamSearchDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def assisted_decoding( + self, + input_ids: torch.LongTensor, + assistant_model: "PreTrainedModel", + do_sample: bool = False, + logits_processor: Optional[LogitsProcessorList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + **model_kwargs, + ): + r""" + Generates sequences of token ids for models with a language modeling head using **greedy decoding** or + **sample** (depending on `do_sample`), assisted by a smaller model. Can be used for text-decoder, text-to-text, + speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin.assisted_decoding`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + assistant_model (`PreTrainedModel`, *optional*): + An assistant model that can be used to accelerate generation. The assistant model must have the exact + same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model + is much faster than running generation with the model you're calling generate from. As such, the + assistant model should be much smaller. + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `forward` function of the model. + If model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("gpt2") + >>> assistant_model = AutoModelForCausalLM.from_pretrained("distilgpt2") + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + >>> input_prompt = "It might be possible to" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) + >>> outputs = model.assisted_decoding( + ... input_ids, + ... assistant_model=assistant_model, + ... logits_processor=logits_processor, + ... stopping_criteria=stopping_criteria, + ... ) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ["It might be possible to get a better understanding of the nature of the problem, but it's not"] + ```""" + # Assistant: initialize assistant-related variables + if hasattr(assistant_model, "num_assistant_tokens"): + warnings.warn( + "Setting `num_assistant_tokens` via `assistant_model.num_assistant_tokens` is deprecated and will be removed in v.37. Make sure to set `num_assistant_tokens` via the generation_config instead.", + FutureWarning, + ) + num_assistant_tokens = assistant_model.num_assistant_tokens + else: + num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens + + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if eos_token_id is not None and pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # prepare assistant model's keys of inputs + assistant_kwargs = copy.copy(model_kwargs) + if assistant_model.config.is_encoder_decoder: + # both are encoder-decoder + input_ids_key = "decoder_input_ids" + attention_key = "decoder_attention_mask" + assistant_kwargs["encoder_outputs"] = assistant_kwargs.pop("assistant_encoder_outputs") + elif "assistant_encoder_outputs" in assistant_kwargs: + # special case for encoder-decoder with decoder-only assistant (like DistilWhisper) + input_ids_key = "input_ids" + attention_key = "attention_mask" + assistant_kwargs["attention_mask"] = assistant_kwargs.get( + "decoder_attention_mask", + torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long), + ) + assistant_kwargs["encoder_outputs"] = assistant_kwargs.pop("assistant_encoder_outputs") + else: + # both are decoder-only + input_ids_key = "input_ids" + attention_key = "attention_mask" + + # keep track of which sequences are already finished + unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) + + # other auxiliary variables + max_len = stopping_criteria[0].max_length + + this_peer_finished = False # used by synced_gpus only + while True: + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + break + + # Assistant: main logic start + cur_len = input_ids.shape[-1] + + # 1. Forecast next N tokens using the assistant model. This `for` block can be replaced with a + # `.generate()` call if we decide to add `past_key_values` as a possible output of generate, as we + # need access to the assistant cache to secure strong speedups. + candidate_input_ids = input_ids + for _ in range(int(num_assistant_tokens)): + # 1.1 prepare assistant model inputs + assistant_inputs = assistant_model.prepare_inputs_for_generation( + candidate_input_ids, + **assistant_kwargs, + ) + + # 1.2. check if the input ids length is correct + has_past_key_values = assistant_inputs.get("past_key_values", None) is not None + if has_past_key_values and assistant_inputs[input_ids_key].shape[-1] not in (1, 2): + raise ValueError("The length of the input ids in assistant inputs should be 1 or 2") + + # 1.3. use the assistant model to obtain the next candidate logits + assistant_model_outputs = assistant_model(**assistant_inputs) + + # 1.4. greedily select the next candidate token + if len(logits_processor) > 0: + assistant_model_outputs.logits[:, -1, :] = logits_processor( + candidate_input_ids, assistant_model_outputs.logits[:, -1, :] + ) + new_token = assistant_model_outputs.logits[:, -1, :].argmax(dim=-1) + candidate_input_ids = torch.cat((candidate_input_ids, new_token[:, None]), dim=-1) + + # 1.5. update assistant model inputs + if assistant_kwargs.get(attention_key, None) is not None: + mask = assistant_kwargs[attention_key] + assistant_kwargs[attention_key] = torch.cat([mask, mask.new_ones((mask.shape[0], 1))], dim=-1) + assistant_kwargs["past_key_values"] = assistant_model_outputs.past_key_values + + # 1.6. stop assistant generation on EOS + if eos_token_id_tensor is not None: + last_assistant_token_is_eos = new_token.tile(eos_token_id_tensor.shape[0], 1) + last_assistant_token_is_eos = ( + ~last_assistant_token_is_eos.ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0).bool() + ) + if last_assistant_token_is_eos: + break + else: + last_assistant_token_is_eos = False + + candidate_length = candidate_input_ids.shape[1] - input_ids.shape[1] + + # 2. Use the original model to obtain the next token logits given the candidate sequence. We obtain + # `candidate_length + 1` relevant logits from this process: in the event that all candidates are correct, + # we use this forward pass to also pick the subsequent logits in the original model. + + # 2.1. Prepare the model inputs + candidate_kwargs = copy.copy(model_kwargs) + candidate_kwargs = _prepare_attention_mask( + candidate_kwargs, candidate_input_ids.shape[1], self.config.is_encoder_decoder + ) + candidate_kwargs = _prepare_token_type_ids(candidate_kwargs, candidate_input_ids.shape[1]) + + model_inputs = self.prepare_inputs_for_generation(candidate_input_ids, **candidate_kwargs) + + # 2.2. Run a forward pass on the candidate sequence + outputs = self( + **model_inputs, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + # 2.3. Process the new logits + new_logits = outputs.logits[:, -candidate_length - 1 :] # excludes the input prompt if present + if len(logits_processor) > 0: + for i in range(candidate_length + 1): + new_logits[:, i, :] = logits_processor(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :]) + if len(logits_warper) > 0: + for i in range(candidate_length + 1): + new_logits[:, i, :] = logits_warper(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :]) + + # 3. Obtain the next tokens from the original model logits. + if do_sample: + probs = new_logits.softmax(dim=-1) + selected_tokens = torch.multinomial(probs[0, :, :], num_samples=1).squeeze(1)[None, :] + else: + selected_tokens = new_logits.argmax(dim=-1) + + # 4. Compare the argmax from the original model logits with the assistant forecasted tokens. We can keep + # the assistant forecasted tokens until the first mismatch, or until the max length is reached. + candidate_new_tokens = candidate_input_ids[:, -candidate_length:] + n_matches = ((~(candidate_new_tokens == selected_tokens[:, :-1])).cumsum(dim=-1) < 1).sum() + + # 5. Update variables according to the number of matching assistant tokens. Remember: the token generated + # by the model after the last candidate match is also valid, as it is generated from a correct sequence. + # Because of this last token, assisted generation search reduces to a normal greedy search/sample if there + # is no match. + + # 5.1. Ensure we don't generate beyond max_len or an EOS token + if last_assistant_token_is_eos and n_matches == candidate_length: + n_matches -= 1 + n_matches = min(n_matches, max_len - cur_len - 1) + + # 5.2. Get the valid continuation, after the matching tokens + valid_tokens = selected_tokens[:, : n_matches + 1] + input_ids = torch.cat((input_ids, valid_tokens), dim=-1) + if streamer is not None: + streamer.put(valid_tokens.cpu()) + new_cur_len = input_ids.shape[-1] + + # 5.3. Discard past key values relative to unused assistant tokens + new_cache_size = new_cur_len - 1 + outputs.past_key_values = _crop_past_key_values(self, outputs.past_key_values, new_cache_size) + assistant_kwargs["past_key_values"] = _crop_past_key_values( + assistant_model, assistant_kwargs["past_key_values"], new_cache_size - 1 + ) # the assistant does not have the token after the last match, hence the -1 + + # 6. Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic, + # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the + # cost of forecasting incorrect assistant tokens. + if assistant_model.generation_config.num_assistant_tokens_schedule == "heuristic": + if n_matches == int(num_assistant_tokens): + num_assistant_tokens += 2.0 + else: + num_assistant_tokens = max(1.0, num_assistant_tokens - 1.0) + + # Assistant: main logic end + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + # Store scores, attentions and hidden_states when required + # Assistant: modified to append one tuple element per token, as in the other generation methods. + if return_dict_in_generate: + if output_scores: + scores += tuple(new_logits[:, i, :] for i in range(n_matches + 1)) + + if "past_key_values" not in model_kwargs: + added_len = new_cur_len + else: + added_len = n_matches + 1 + + if output_attentions: + if self.config.is_encoder_decoder: + cross_attentions = _split_model_outputs( + cross_attentions, outputs.cross_attentions, cur_len, added_len + ) + decoder_attentions = _split_model_outputs( + decoder_attentions, + outputs.decoder_attentions, + cur_len, + added_len, + is_decoder_attention=True, + ) + else: + decoder_attentions = _split_model_outputs( + decoder_attentions, + outputs.attentions, + cur_len, + added_len, + is_decoder_attention=True, + ) + if output_hidden_states: + if self.config.is_encoder_decoder: + decoder_hidden_states = _split_model_outputs( + decoder_hidden_states, outputs.decoder_hidden_states, cur_len, added_len + ) + else: + decoder_hidden_states = _split_model_outputs( + decoder_hidden_states, outputs.hidden_states, cur_len, added_len + ) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # Update assistant_kwargs for the assistant's next round of generations + assistant_kwargs = _prepare_attention_mask( + assistant_kwargs, new_cur_len, assistant_model.config.is_encoder_decoder + ) + assistant_kwargs = _prepare_token_type_ids(assistant_kwargs, new_cur_len) + + # if eos_token was found in one sentence, set sentence to finished + if eos_token_id_tensor is not None: + unfinished_sequences = unfinished_sequences.mul( + input_ids[:, -1] + .tile(eos_token_id_tensor.shape[0], 1) + .ne(eos_token_id_tensor.unsqueeze(1)) + .prod(dim=0) + ) + + # stop when each sentence is finished + if unfinished_sequences.max() == 0: + this_peer_finished = True + + # stop if we exceed the maximum length + if stopping_criteria(input_ids, scores): + this_peer_finished = True + + if this_peer_finished and not synced_gpus: + break + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GreedySearchEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GreedySearchDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + +def _crop_past_key_values(model, past_key_values, maximum_length): + """Crops the past key values up to a certain maximum length.""" + new_past = [] + if model.config.is_encoder_decoder: + for idx in range(len(past_key_values)): + new_past.append( + ( + past_key_values[idx][0][:, :, :maximum_length, :], + past_key_values[idx][1][:, :, :maximum_length, :], + past_key_values[idx][2], + past_key_values[idx][3], + ) + ) + past_key_values = tuple(new_past) + # bloom is special + elif "bloom" in model.__class__.__name__.lower() or ( + model.config.architectures is not None and "bloom" in model.config.architectures[0].lower() + ): + for idx in range(len(past_key_values)): + new_past.append( + ( + past_key_values[idx][0][:, :, :maximum_length], + past_key_values[idx][1][:, :maximum_length, :], + ) + ) + past_key_values = tuple(new_past) + # gptbigcode is too + elif "gptbigcode" in model.__class__.__name__.lower() or ( + model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower() + ): + if model.config.multi_query: + for idx in range(len(past_key_values)): + past_key_values[idx] = past_key_values[idx][:, :maximum_length, :] + else: + for idx in range(len(past_key_values)): + past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :] + else: + for idx in range(len(past_key_values)): + new_past.append( + ( + past_key_values[idx][0][:, :, :maximum_length, :], + past_key_values[idx][1][:, :, :maximum_length, :], + ) + ) + past_key_values = tuple(new_past) + return past_key_values + + +def _split_model_outputs(outputs, new_outputs, cur_len, added_len, is_decoder_attention=False): + """ + Given the (decoder/cross attentions)/(decoder hidden states) for multiple generated tokens, splits it into a tuple + where each member corresponds to a single generated token. + """ + # Retrocompatibility: in our generation functions, the first iteration includes the attention/hidden states for the + # prompt. + if len(outputs) == 0: + new_tuple = () + for layer in new_outputs: + last_dim_size = cur_len if is_decoder_attention else layer.shape[-1] + new_tuple += (layer[..., :cur_len, :last_dim_size],) + outputs += (new_tuple,) + # The first iteration contains the prompt + 1 generated token, let's update the length variables accordingly + cur_len += 1 + added_len -= cur_len + + for i in range(added_len): + new_tuple = () + for layer in new_outputs: + last_dim_size = cur_len + i if is_decoder_attention else layer.shape[-1] + new_tuple += (layer[..., i : i + 1, :last_dim_size],) + outputs += (new_tuple,) + return outputs + + +def top_k_top_p_filtering( + logits: torch.FloatTensor, + top_k: int = 0, + top_p: float = 1.0, + filter_value: float = -float("Inf"), + min_tokens_to_keep: int = 1, +) -> torch.FloatTensor: + """ + Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + + Args: + logits: logits distribution shape (batch size, vocabulary size) + top_k (`int`, *optional*, defaults to 0): + If > 0, only keep the top k tokens with highest probability (top-k filtering) + top_p (`float`, *optional*, defaults to 1.0): + If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus + filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimumber of tokens we keep per batch example in the output. + + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + if top_k > 0: + logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( + None, logits + ) + + if 0 <= top_p <= 1.0: + logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( + None, logits + ) + + return logits + + +def _ranking_fast( + context_hidden: torch.FloatTensor, + next_hidden: torch.FloatTensor, + next_top_k_probs: torch.FloatTensor, + alpha: float, + beam_width: int, +) -> torch.FloatTensor: + """ + Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described + in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each + row in the batch. + """ + norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True) + norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True) + cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S] + degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K] + next_top_k_probs = next_top_k_probs.view(-1) # [B*K] + contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty + contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K] + _, selected_idx = contrastive_score.max(dim=-1) # [B] + return selected_idx + + +def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]: + """Expands or crops the model's mask for decoding purposes, to the defined length""" + + mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask" + if mask_key not in model_kwargs: + return model_kwargs + + mask = model_kwargs[mask_key] + mask_length_diff = new_length - mask.shape[1] + + if mask_length_diff < 0: + model_kwargs[mask_key] = mask[:, :mask_length_diff] + elif mask_length_diff > 0: + model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1) + return model_kwargs + + +def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]: + """Expands or crops the model's token_type_ids for decoding purposes, to the defined length""" + if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None: + return model_kwargs + + token_type_ids = model_kwargs["token_type_ids"] + final_token_type = token_type_ids[:, -1].unsqueeze(-1) + type_length_diff = new_length - token_type_ids.shape[1] + + if type_length_diff < 0: + token_type_ids = token_type_ids[:, :type_length_diff] + elif type_length_diff > 0: + token_type_copies = final_token_type.repeat(1, type_length_diff) + model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1) + return model_kwargs diff --git a/modified/integrations/__init__.py b/modified/integrations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1e41263eef70e5d813c822d4a94956a7389c11 --- /dev/null +++ b/modified/integrations/__init__.py @@ -0,0 +1,146 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ..utils import _LazyModule + + +_import_structure = { + "awq": ["fuse_awq_modules", "replace_with_awq_linear"], + "bitsandbytes": [ + "get_keys_to_not_convert", + "replace_8bit_linear", + "replace_with_bnb_linear", + "set_module_8bit_tensor_to_device", + "set_module_quantized_tensor_to_device", + ], + "deepspeed": [ + "HfDeepSpeedConfig", + "HfTrainerDeepSpeedConfig", + "deepspeed_config", + "deepspeed_init", + "deepspeed_load_checkpoint", + "deepspeed_optim_sched", + "is_deepspeed_available", + "is_deepspeed_zero3_enabled", + "set_hf_deepspeed_config", + "unset_hf_deepspeed_config", + ], + "integration_utils": [ + "INTEGRATION_TO_CALLBACK", + "AzureMLCallback", + "ClearMLCallback", + "CodeCarbonCallback", + "CometCallback", + "DagsHubCallback", + "DVCLiveCallback", + "FlyteCallback", + "MLflowCallback", + "NeptuneCallback", + "NeptuneMissingConfiguration", + "TensorBoardCallback", + "WandbCallback", + "get_available_reporting_integrations", + "get_reporting_integration_callbacks", + "hp_params", + "is_azureml_available", + "is_clearml_available", + "is_codecarbon_available", + "is_comet_available", + "is_dagshub_available", + "is_dvclive_available", + "is_flyte_deck_standard_available", + "is_flytekit_available", + "is_mlflow_available", + "is_neptune_available", + "is_optuna_available", + "is_ray_available", + "is_ray_tune_available", + "is_sigopt_available", + "is_tensorboard_available", + "is_wandb_available", + "rewrite_logs", + "run_hp_search_optuna", + "run_hp_search_ray", + "run_hp_search_sigopt", + "run_hp_search_wandb", + ], + "peft": ["PeftAdapterMixin"], +} + +if TYPE_CHECKING: + from .awq import fuse_awq_modules, replace_with_awq_linear + from .bitsandbytes import ( + get_keys_to_not_convert, + replace_8bit_linear, + replace_with_bnb_linear, + set_module_8bit_tensor_to_device, + set_module_quantized_tensor_to_device, + ) + from .deepspeed import ( + HfDeepSpeedConfig, + HfTrainerDeepSpeedConfig, + deepspeed_config, + deepspeed_init, + deepspeed_load_checkpoint, + deepspeed_optim_sched, + is_deepspeed_available, + is_deepspeed_zero3_enabled, + set_hf_deepspeed_config, + unset_hf_deepspeed_config, + ) + from .integration_utils import ( + INTEGRATION_TO_CALLBACK, + AzureMLCallback, + ClearMLCallback, + CodeCarbonCallback, + CometCallback, + DagsHubCallback, + DVCLiveCallback, + FlyteCallback, + MLflowCallback, + NeptuneCallback, + NeptuneMissingConfiguration, + TensorBoardCallback, + WandbCallback, + get_available_reporting_integrations, + get_reporting_integration_callbacks, + hp_params, + is_azureml_available, + is_clearml_available, + is_codecarbon_available, + is_comet_available, + is_dagshub_available, + is_dvclive_available, + is_flyte_deck_standard_available, + is_flytekit_available, + is_mlflow_available, + is_neptune_available, + is_optuna_available, + is_ray_available, + is_ray_tune_available, + is_sigopt_available, + is_tensorboard_available, + is_wandb_available, + rewrite_logs, + run_hp_search_optuna, + run_hp_search_ray, + run_hp_search_sigopt, + run_hp_search_wandb, + ) + from .peft import PeftAdapterMixin +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/modified/integrations/__pycache__/__init__.cpython-39.pyc b/modified/integrations/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6103127d0794c5a4a1717b3bef6a592eea53bdc Binary files /dev/null and b/modified/integrations/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/integrations/__pycache__/awq.cpython-39.pyc b/modified/integrations/__pycache__/awq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ab29c84fa788c1627c0b79be1fe61866a383d44 Binary files /dev/null and b/modified/integrations/__pycache__/awq.cpython-39.pyc differ diff --git a/modified/integrations/__pycache__/bitsandbytes.cpython-39.pyc b/modified/integrations/__pycache__/bitsandbytes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03b5a3132fe81b7e4cdd886657e7064b22200265 Binary files /dev/null and b/modified/integrations/__pycache__/bitsandbytes.cpython-39.pyc differ diff --git a/modified/integrations/__pycache__/deepspeed.cpython-39.pyc b/modified/integrations/__pycache__/deepspeed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7faa8c499a85e212576725a7e0a27fb3c32d0e75 Binary files /dev/null and b/modified/integrations/__pycache__/deepspeed.cpython-39.pyc differ diff --git a/modified/integrations/__pycache__/integration_utils.cpython-39.pyc b/modified/integrations/__pycache__/integration_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20435511324979c2d92448823f51114b48615337 Binary files /dev/null and b/modified/integrations/__pycache__/integration_utils.cpython-39.pyc differ diff --git a/modified/integrations/__pycache__/peft.cpython-39.pyc b/modified/integrations/__pycache__/peft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..326b88bfe9d59d1015cff1f5f413486e7966c859 Binary files /dev/null and b/modified/integrations/__pycache__/peft.cpython-39.pyc differ diff --git a/modified/integrations/awq.py b/modified/integrations/awq.py new file mode 100644 index 0000000000000000000000000000000000000000..336a216e4014614953a7cbd6d898b123205edfb0 --- /dev/null +++ b/modified/integrations/awq.py @@ -0,0 +1,339 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"AWQ (Activation aware Weight Quantization) integration file" +from ..activations import ACT2FN +from ..modeling_utils import PreTrainedModel +from ..utils import is_auto_awq_available, is_torch_available +from ..utils.quantization_config import AwqBackendPackingMethod, AwqConfig, AWQLinearVersion + + +if is_torch_available(): + import torch + import torch.nn as nn + + +AWQ_FUSED_MAPPINGS = { + "mistral": { + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + }, + "llama": { + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + }, +} + + +def replace_with_awq_linear( + model, + modules_to_not_convert=None, + quantization_config=None, + current_key_name=None, + has_been_replaced=False, +) -> bool: + """ + Public method that recursively replaces the Linear layers of the given model with AWQ quantized layers. + `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the + conversion has been successfull or not. + + During the module replacement, we also infer the backend to use through the `quantization_config` object. + + Args: + model (`torch.nn.Module`): + The model to convert, can be any `torch.nn.Module` instance. + quantization_config (`AwqConfig`): + The quantization config object that contains the quantization parameters. + modules_to_not_convert (`list`, *optional*): + A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be + converted. + current_key_name (`list`, *optional*): + A list that contains the current key name. This is used for recursion and should not be passed by the user. + has_been_replaced (`bool`, *optional*): + A boolean that indicates if the conversion has been successful or not. This is used for recursion and + should not be passed by the user. + """ + if modules_to_not_convert is None: + modules_to_not_convert = [] + + backend = quantization_config.backend + + if not is_auto_awq_available(): + raise ValueError( + "AWQ (either `autoawq` or `llmawq`) is not available. Please install it with `pip install autoawq` or check out the installation guide in https://github.com/mit-han-lab/llm-awq" + ) + + if backend == AwqBackendPackingMethod.AUTOAWQ: + from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV + elif backend == AwqBackendPackingMethod.LLMAWQ: + from awq.quantize.qmodule import WQLinear + + if backend == AwqBackendPackingMethod.AUTOAWQ: + target_cls = WQLinear_GEMM if quantization_config.version == AWQLinearVersion.GEMM else WQLinear_GEMV + else: + target_cls = WQLinear + + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if isinstance(module, nn.Linear) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): + in_features = module.in_features + out_features = module.out_features + + model._modules[name] = target_cls( + w_bit=quantization_config.bits, + group_size=quantization_config.group_size, + in_features=in_features, + out_features=out_features, + bias=module.bias is not None, + dev=module.weight.device, + ) + has_been_replaced = True + + # Force requires grad to False to avoid unexpected errors + model._modules[name].requires_grad_(False) + if len(list(module.children())) > 0: + _, has_been_replaced = replace_with_awq_linear( + module, + modules_to_not_convert=modules_to_not_convert, + current_key_name=current_key_name, + quantization_config=quantization_config, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced + + +def get_modules_to_fuse(model, quantization_config): + """ + Returns the fusing mapping given the quantization config and the model + + Args: + model (`~PreTrainedModel`): + The model to fuse - note this model should have been converted into AWQ format beforehand. + quantization_config (`~transformers.quantization_config.AWQConfig`): + The quantization configuration to use. + """ + if not isinstance(model, PreTrainedModel): + raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}") + + # Always default to `quantization_config.modules_to_fuse` + if quantization_config.modules_to_fuse is not None: + current_fused_mapping = quantization_config.modules_to_fuse + current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len + elif model.config.model_type in AWQ_FUSED_MAPPINGS: + current_fused_mapping = AWQ_FUSED_MAPPINGS[model.config.model_type] + + # Handle hidden_size, num_attention_heads, num_key_value_heads on our own. + hidden_size = model.config.hidden_size + num_attention_heads = model.config.num_attention_heads + num_key_value_heads = getattr(model.config, "num_key_value_heads", num_attention_heads) + + # Fill `current_fused_mapping` with the expected values + current_fused_mapping["hidden_size"] = hidden_size + current_fused_mapping["num_attention_heads"] = num_attention_heads + current_fused_mapping["num_key_value_heads"] = num_key_value_heads + current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len + else: + raise ValueError( + "Fusing mapping not found either on the quantization config or the supported `AWQ_FUSED_MAPPINGS`. Please pass a `fused_mapping` argument" + " in the `quantization_config` or raise an issue on transformers https://github.com/huggingface/transformers to add its support." + ) + return current_fused_mapping + + +def fuse_awq_modules(model, quantization_config): + """ + Optionally fuse some modules in the model to speedup inference. + + Args: + model (`~PreTrainedModel`): + The model to fuse - note this model should have been converted into AWQ format beforehand. + quantization_config (`dict`): + The quantization configuration to use. + """ + # We need to convert it from dict in order to get an AwqConfig object + # otherwise the fields `backend` etc. will not be available + # https://github.com/huggingface/transformers/pull/27411#discussion_r1414044495 + awq_config = AwqConfig.from_dict(quantization_config) + backend = awq_config.backend + + modules_to_fuse = get_modules_to_fuse(model, awq_config) + + if backend == AwqBackendPackingMethod.AUTOAWQ: + from awq.modules.fused.attn import QuantAttentionFused + from awq.modules.fused.mlp import QuantFusedMLP + from awq.modules.fused.norm import FasterTransformerRMSNorm + else: + raise ValueError("Fusing is only supported for the AutoAWQ backend") + + for name, module in model.named_modules(): + # Replace layer norms + _fuse_awq_layernorm(modules_to_fuse["layernorm"], module, FasterTransformerRMSNorm) + + # Replace MLP layers + _fuse_awq_mlp(model, name, modules_to_fuse["mlp"], module, QuantFusedMLP) + + # Replace attention layers + _fuse_awq_attention_layers(model, module, modules_to_fuse, name, QuantAttentionFused) + return model + + +def _fuse_awq_layernorm(fuse_module_names, module, target_cls): + """ + Fuse the LayerNorm layers into a target class using autoawq + + Args: + fuse_module_names (`List[str]`): + The list of module names to fuse + module (`nn.Module`): + The pytorch parent module that has layernorm modules to fuse + target_cls (`~autoawq.FasterTransformerRMSNorm`): + The `FasterTransformerRMSNorm` class as it only supports that class + for now. + """ + for module_name in fuse_module_names: + if hasattr(module, module_name): + old_module = getattr(module, module_name) + module._modules[module_name] = target_cls( + old_module.weight, + old_module.variance_epsilon, + ).to(old_module.weight.device) + del old_module + + +def _fuse_awq_mlp(model, current_module_name, fuse_module_names, module, target_cls): + """ + Fuse the MLP layers into a target class using autoawq + + Args: + model (`~PreTrainedModel`): + The input pretrained model + current_module_name (`str`): + The current submodule name + fuse_module_names (`List[str]`): + The list of module names to fuse. For the MLP layers it has to be an array + of length 3 that consists of the 3 MLP layers in the order (gate (dense layer post-attention) / up / down layers) + module (`nn.Module`): + The pytorch parent module that has layernorm modules to fuse + target_cls (`~autoawq.QuantFusedMLP`): + The `QuantFusedMLP` class as it only supports that class + for now. + """ + if len(fuse_module_names) == 0: + return + + if hasattr(module, fuse_module_names[0]): + gate_proj = getattr(module, fuse_module_names[0]) + up_proj = getattr(module, fuse_module_names[1]) + down_proj = getattr(module, fuse_module_names[2]) + + previous_device = gate_proj.qweight.device + activation_fn = ACT2FN[model.config.hidden_act] + new_module = target_cls(gate_proj, down_proj, up_proj, activation_fn) + + parent_name, child_name = current_module_name.rsplit(".", 1) + parent = model.get_submodule(parent_name) + setattr(parent, child_name, new_module.to(previous_device)) + + del gate_proj, up_proj, down_proj + + +def _fuse_awq_attention_layers(model, module, modules_to_fuse, current_module_name, target_cls): + """ + Fuse the Attention layers into a target class using autoawq + + Args: + model (`~PreTrainedModel`): + The input pretrained model + module (`nn.Module`): + The pytorch parent module that has layernorm modules to fuse + modules_to_fuse (`List[str]`): + The module fusing mapping. The dictionary has to contain a field `attention` with attention module names + in the correct order: q, k, v, o layer + current_module_name (`str`): + The current submodule name + target_cls (`~autoawq.QuantAttentionFused`): + The `QuantAttentionFused` class as it only supports that class + for now. + """ + from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV + + if len(modules_to_fuse["attention"]) == 0: + return + + if hasattr(module, modules_to_fuse["attention"][0]): + # First, we pack the QKV layers together + q_proj = getattr(module, modules_to_fuse["attention"][0]) + previous_device = q_proj.qweight.device + + if isinstance(q_proj, WQLinear_GEMV): + linear_target_cls = WQLinear_GEMV + cat_dim = 0 + elif isinstance(q_proj, WQLinear_GEMM): + linear_target_cls = WQLinear_GEMM + cat_dim = 1 + else: + raise ValueError("Unsupported q_proj type: {type(q_proj)}") + + k_proj = getattr(module, modules_to_fuse["attention"][1]) + v_proj = getattr(module, modules_to_fuse["attention"][2]) + o_proj = getattr(module, modules_to_fuse["attention"][3]) + + bias = torch.cat([q_proj.bias, k_proj.bias, v_proj.bias], dim=0) if q_proj.bias is not None else None + + qkv_layer = linear_target_cls( + q_proj.w_bit, + q_proj.group_size, + q_proj.in_features, + q_proj.out_features + k_proj.out_features + v_proj.out_features, + q_proj.bias is not None, + next(iter(module.state_dict().values())).device, + ) + + qkv_layer.qweight = torch.cat([q_proj.qweight, k_proj.qweight, v_proj.qweight], dim=cat_dim) + qkv_layer.qzeros = torch.cat([q_proj.qzeros, k_proj.qzeros, v_proj.qzeros], dim=cat_dim) + qkv_layer.scales = torch.cat([q_proj.scales, k_proj.scales, v_proj.scales], dim=cat_dim) + + if isinstance(qkv_layer, WQLinear_GEMV): + qkv_layer.split_k_iters = q_proj.split_k_iters + + qkv_layer.bias = bias + + fused_attention_layer = target_cls( + modules_to_fuse["hidden_size"], + modules_to_fuse["num_attention_heads"], + modules_to_fuse["num_key_value_heads"], + qkv_layer, + o_proj, + previous_device, + modules_to_fuse["max_seq_len"], + use_alibi=modules_to_fuse["use_alibi"], + ) + + fused_attention_layer.is_hf_transformers = True + + parent_name, child_name = current_module_name.rsplit(".", 1) + parent = model.get_submodule(parent_name) + setattr(parent, child_name, fused_attention_layer.to(previous_device)) + + del q_proj, k_proj, v_proj, o_proj diff --git a/modified/integrations/bitsandbytes.py b/modified/integrations/bitsandbytes.py new file mode 100644 index 0000000000000000000000000000000000000000..1a8220b1ed7b034d9a1e2c6486482cf13c6af1fe --- /dev/null +++ b/modified/integrations/bitsandbytes.py @@ -0,0 +1,290 @@ +import importlib.metadata +import warnings +from copy import deepcopy + +from packaging import version + +from ..utils import is_accelerate_available, is_bitsandbytes_available, logging + + +if is_bitsandbytes_available(): + import bitsandbytes as bnb + import torch + import torch.nn as nn + + from ..pytorch_utils import Conv1D + +if is_accelerate_available(): + from accelerate import init_empty_weights + from accelerate.utils import find_tied_parameters + +logger = logging.get_logger(__name__) + + +def set_module_quantized_tensor_to_device(module, tensor_name, device, value=None, fp16_statistics=None): + """ + A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing + `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The + function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the + class `Int8Params` from `bitsandbytes`. + + Args: + module (`torch.nn.Module`): + The module in which the tensor we want to move lives. + tensor_name (`str`): + The full name of the parameter/buffer. + device (`int`, `str` or `torch.device`): + The device on which to set the tensor. + value (`torch.Tensor`, *optional*): + The value of the tensor (useful when going from the meta device to any other device). + fp16_statistics (`torch.HalfTensor`, *optional*): + The list of fp16 statistics to set on the module, used for serialization. + """ + # Recurse if needed + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + + if tensor_name not in module._parameters and tensor_name not in module._buffers: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + is_buffer = tensor_name in module._buffers + old_value = getattr(module, tensor_name) + + if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") + + is_4bit = False + is_8bit = False + if is_buffer or not is_bitsandbytes_available(): + is_8bit = False + is_4bit = False + else: + is_4bit = hasattr(bnb.nn, "Params4bit") and isinstance(module._parameters[tensor_name], bnb.nn.Params4bit) + is_8bit = isinstance(module._parameters[tensor_name], bnb.nn.Int8Params) + + if is_8bit or is_4bit: + param = module._parameters[tensor_name] + if param.device.type != "cuda": + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to("cpu") + if value.dtype == torch.int8: + is_8bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( + "0.37.2" + ) + if not is_8bit_serializable: + raise ValueError( + "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " + "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." + ) + else: + new_value = torch.tensor(value, device="cpu") + + # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. + # Since weights are saved in the correct "orientation", we skip transposing when loading. + if issubclass(module.source_cls, Conv1D) and fp16_statistics is None: + new_value = new_value.T + + kwargs = old_value.__dict__ + if is_8bit: + new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(device) + elif is_4bit: + new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(device) + + module._parameters[tensor_name] = new_value + if fp16_statistics is not None: + setattr(module.weight, "SCB", fp16_statistics.to(device)) + + else: + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to(device) + else: + new_value = torch.tensor(value, device=device) + + if is_buffer: + module._buffers[tensor_name] = new_value + else: + new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad) + module._parameters[tensor_name] = new_value + + +def _replace_with_bnb_linear( + model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False +): + """ + Private method that wraps the recursion for module replacement. + + Returns the converted model and a boolean that indicates if the conversion has been successfull or not. + """ + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if (isinstance(module, nn.Linear) or isinstance(module, Conv1D)) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): + with init_empty_weights(): + if isinstance(module, Conv1D): + in_features, out_features = module.weight.shape + else: + in_features = module.in_features + out_features = module.out_features + + if quantization_config.quantization_method() == "llm_int8": + model._modules[name] = bnb.nn.Linear8bitLt( + in_features, + out_features, + module.bias is not None, + has_fp16_weights=quantization_config.llm_int8_has_fp16_weight, + threshold=quantization_config.llm_int8_threshold, + ) + has_been_replaced = True + else: + if ( + quantization_config.llm_int8_skip_modules is not None + and name in quantization_config.llm_int8_skip_modules + ): + pass + else: + model._modules[name] = bnb.nn.Linear4bit( + in_features, + out_features, + module.bias is not None, + quantization_config.bnb_4bit_compute_dtype, + compress_statistics=quantization_config.bnb_4bit_use_double_quant, + quant_type=quantization_config.bnb_4bit_quant_type, + ) + has_been_replaced = True + # Store the module class in case we need to transpose the weight later + model._modules[name].source_cls = type(module) + # Force requires grad to False to avoid unexpected errors + model._modules[name].requires_grad_(False) + if len(list(module.children())) > 0: + _, has_been_replaced = _replace_with_bnb_linear( + module, + modules_to_not_convert, + current_key_name, + quantization_config, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced + + +def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None): + """ + A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` + library. This will enable running your models using mixed int8 precision as described by the paper `LLM.int8(): + 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA + version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ + bitsandbytes` + + The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should + be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no + CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a + matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 + (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no + predictive degradation is possible for very large models (>=176B parameters). + + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`): + Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision + for numerical stability reasons. + current_key_name (`List[`str`]`, *optional*): + An array to track the current key of the recursion. This is used to check whether the current key (part of + it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or + `disk`). + """ + modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert + model, has_been_replaced = _replace_with_bnb_linear( + model, modules_to_not_convert, current_key_name, quantization_config + ) + + if not has_been_replaced: + logger.warning( + "You are loading your model in 8bit or 4bit but no linear modules were found in your model." + " Please double check your model architecture, or submit an issue on github if you think this is" + " a bug." + ) + + return model + + +# For backward compatibility +def replace_8bit_linear(*args, **kwargs): + warnings.warn( + "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead", + FutureWarning, + ) + return replace_with_bnb_linear(*args, **kwargs) + + +# For backward compatiblity +def set_module_8bit_tensor_to_device(*args, **kwargs): + warnings.warn( + "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead", + FutureWarning, + ) + return set_module_quantized_tensor_to_device(*args, **kwargs) + + +def get_keys_to_not_convert(model): + r""" + An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules + we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want + to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in + int8. + + Parameters: + model (`torch.nn.Module`): + Input model + """ + # Create a copy of the model and tie the weights, then + # check if it contains tied weights + tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` + tied_model.tie_weights() + + tied_params = find_tied_parameters(tied_model) + # For compatibility with Accelerate < 0.18 + if isinstance(tied_params, dict): + tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) + else: + tied_keys = sum(tied_params, []) + has_tied_params = len(tied_keys) > 0 + + # If there is not tied weights, we want to keep the lm_head(output_embedding) in full precision + if not has_tied_params: + output_emb = model.get_output_embeddings() + if output_emb is not None: + list_last_module = [name for name, module in model.named_modules() if id(module) == id(output_emb)] + return list_last_module + + # otherwise, no tied weights, no output embedding defined, simply keep the last module in full precision + list_modules = list(model.named_parameters()) + list_last_module = [list_modules[-1][0]] + # add last module together with tied weights + intersection = set(list_last_module) - set(tied_keys) + list_untouched = list(set(tied_keys)) + list(intersection) + + # remove ".weight" from the keys + names_to_remove = [".weight", ".bias"] + filtered_module_names = [] + for name in list_untouched: + for name_to_remove in names_to_remove: + if name_to_remove in name: + name = name.replace(name_to_remove, "") + filtered_module_names.append(name) + + return filtered_module_names diff --git a/modified/integrations/deepspeed.py b/modified/integrations/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..fb9c022b0f28f4fcfb334dde50b25f8fda7e95c7 --- /dev/null +++ b/modified/integrations/deepspeed.py @@ -0,0 +1,408 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Integration with Deepspeed +""" + +import importlib.metadata as importlib_metadata +import importlib.util +import weakref +from functools import partialmethod + +from ..dependency_versions_check import dep_version_check +from ..utils import is_accelerate_available, is_torch_available, logging + + +if is_torch_available(): + import torch + + from ..optimization import get_scheduler + +logger = logging.get_logger(__name__) + + +def is_deepspeed_available(): + package_exists = importlib.util.find_spec("deepspeed") is not None + + # Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version + # AND checking it has an author field in the metadata that is HuggingFace. + if package_exists: + try: + _ = importlib_metadata.metadata("deepspeed") + return True + except importlib_metadata.PackageNotFoundError: + return False + + +if is_accelerate_available() and is_deepspeed_available(): + from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig +else: + # Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file. + # Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available. + from builtins import object as DeepSpeedConfig + + +class HfDeepSpeedConfig(DeepSpeedConfig): + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + # set global weakref object + set_hf_deepspeed_config(self) + dep_version_check("accelerate") + dep_version_check("deepspeed") + super().__init__(config_file_or_dict) + + +class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig): + """ + The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the + same lifespan as the latter. + """ + + def __init__(self, config_file_or_dict): + super().__init__(config_file_or_dict) + self._dtype = None + self.mismatches = [] + + def dtype(self): + if self._dtype is None: + raise ValueError("trainer_config_process() wasn't called yet to tell dtype") + return self._dtype + + def is_auto(self, ds_key_long): + val = self.get_value(ds_key_long) + if val is None: + return False + else: + return val == "auto" + + def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True): + """ + A utility method that massages the config file and can optionally verify that the values match. + + 1. Replace "auto" values with `TrainingArguments` value. + + 2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer + config values and if mismatched add the entry to `self.mismatched` - will assert during + `trainer_config_finalize` for one or more mismatches. + + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return + + if config.get(ds_key) == "auto": + config[ds_key] = hf_val + return + + if not must_match: + return + + ds_val = config.get(ds_key) + if ds_val is not None and ds_val != hf_val: + self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}") + + fill_only = partialmethod(fill_match, must_match=False) + + def trainer_config_process(self, args): + """ + Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object + creation. + """ + # DeepSpeed does: + # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps + train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps + self.fill_match( + "train_micro_batch_size_per_gpu", args.per_device_train_batch_size, "per_device_train_batch_size" + ) + self.fill_match("gradient_accumulation_steps", args.gradient_accumulation_steps, "gradient_accumulation_steps") + self.fill_match("train_batch_size", train_batch_size, "train_batch_size (calculated)") + self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm") + + self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate") + self.fill_match("optimizer.params.betas", [args.adam_beta1, args.adam_beta2], "adam_beta1+adam_beta2") + self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon") + self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay") + + self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg + self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate") + # total_num_steps - will get set in trainer_config_finalize + + # fp16 + if args.fp16 or args.fp16_full_eval: + fp16_backend = "apex" if args.fp16_backend == "apex" else "amp" + else: + fp16_backend = None + + if args.save_on_each_node: + # deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True + self.config["checkpoint"] = self.config.get("checkpoint", {}) + self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node + + # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set + # any here unless the user did the work + self.fill_match( + "fp16.enabled", + ((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"), + "fp16|fp16_full_eval+fp16_backend(amp)", + ) + + # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any + # ZeRO features + self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)") + self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level") + + self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval") + + # deepspeed's default mode is fp16 unless there is a config that says differently + if self.is_true("bf16.enabled"): + self._dtype = torch.bfloat16 + elif self.is_false("fp16.enabled"): + self._dtype = torch.float32 + else: + self._dtype = torch.float16 + + def trainer_config_finalize(self, args, model, num_training_steps): + """ + This stage is run after we have the model and know num_training_steps. + + Now we can complete the configuration process. + """ + # zero + + # deal with config keys that use `auto` value and rely on model's hidden_size + hidden_size_based_keys = [ + "zero_optimization.reduce_bucket_size", + "zero_optimization.stage3_prefetch_bucket_size", + "zero_optimization.stage3_param_persistence_threshold", + ] + hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)] + + if len(hidden_size_auto_keys) > 0: + if hasattr(model.config, "hidden_size"): + hidden_size = model.config.hidden_size + elif hasattr(model.config, "hidden_sizes"): + # if there are many hidden sizes pick the largest one + hidden_size = max(model.config.hidden_sizes) + else: + raise ValueError( + "The model's config file has neither `hidden_size` nor `hidden_sizes` entry, " + "therefore it's not possible to automatically fill out the following `auto` entries " + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " + "`auto` values for these keys with an integer value of your choice." + ) + + self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size) + if self.is_zero3(): + # automatically assign the optimal config values based on model config + self.fill_only("zero_optimization.stage3_prefetch_bucket_size", 0.9 * hidden_size * hidden_size) + self.fill_only("zero_optimization.stage3_param_persistence_threshold", 10 * hidden_size) + + # scheduler + self.fill_match("scheduler.params.total_num_steps", num_training_steps, "num_training_steps (calculated)") + self.fill_match("scheduler.params.warmup_num_steps", args.get_warmup_steps(num_training_steps), "warmup_steps") + + if len(self.mismatches) > 0: + mismatches = "\n".join(self.mismatches) + raise ValueError( + "Please correct the following DeepSpeed config values that mismatch TrainingArguments" + f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'." + ) + + +# keep the config object global to be able to access it anywhere during TrainingArguments life-cycle +_hf_deepspeed_config_weak_ref = None + + +def set_hf_deepspeed_config(hf_deepspeed_config_obj): + # this is a special weakref global object to allow us to get to Deepspeed config from APIs + # that don't have an easy way to get to the Deepspeed config outside of the Trainer domain. + global _hf_deepspeed_config_weak_ref + # will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed) + _hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj) + + +def unset_hf_deepspeed_config(): + # useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method + global _hf_deepspeed_config_weak_ref + _hf_deepspeed_config_weak_ref = None + + +def is_deepspeed_zero3_enabled(): + if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: + return _hf_deepspeed_config_weak_ref().is_zero3() + else: + return False + + +def deepspeed_config(): + if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: + return _hf_deepspeed_config_weak_ref().config + else: + return None + + +def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters): + """ + A convenience wrapper that deals with optimizer and lr scheduler configuration. + """ + from accelerate.utils import DummyOptim, DummyScheduler + + config = hf_deepspeed_config.config + + # Optimizer + Scheduler + # Currently supported combos: + # 1. DS scheduler + DS optimizer: Yes + # 2. HF scheduler + HF optimizer: Yes + # 3. DS scheduler + HF optimizer: Yes + # 4. HF scheduler + DS optimizer: No + # + # Unless Offload is enabled in which case it's: + # 1. DS scheduler + DS optimizer: Yes + # 2. HF scheduler + HF optimizer: Mostly* + # 3. DS scheduler + HF optimizer: Mostly* + # 4. HF scheduler + DS optimizer: Yes + # + # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB) + + optimizer = None + if "optimizer" in config: + if args.adafactor: + raise ValueError( + "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. " + "Only one optimizer can be configured." + ) + optimizer = DummyOptim(params=model_parameters) + else: + if hf_deepspeed_config.is_offload(): + logger.info( + "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the" + " custom optimizer has both CPU and GPU implementation (except LAMB)" + ) + + # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch. + # But trainer uses AdamW by default. + optimizer = trainer.create_optimizer() + # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer` + config["zero_allow_untested_optimizer"] = True + + lr_scheduler = None + if "scheduler" in config: + lr_scheduler = DummyScheduler(optimizer) + else: + if isinstance(optimizer, DummyOptim): + + def _lr_scheduler_callable(optimizer): + return get_scheduler( + trainer.args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=trainer.args.get_warmup_steps(num_training_steps), + num_training_steps=num_training_steps, + ) + + lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable) + else: + lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) + + return optimizer, lr_scheduler + + +def deepspeed_init(trainer, num_training_steps, inference=False): + """ + Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. + + If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. + + Args: + trainer: Trainer object + num_training_steps: per single gpu + resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load + inference: launch in inference mode (no optimizer and no lr scheduler) + + Returns: optimizer, lr_scheduler + + We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on: + https://github.com/microsoft/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it + can't resume from a checkpoint after it did some stepping https://github.com/microsoft/DeepSpeed/issues/1612 + + """ + from deepspeed.utils import logger as ds_logger + + model = trainer.model + args = trainer.args + + hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config + + # resume config update - some bits like `model` and `num_training_steps` only become available during train + hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps) + + # set the Deepspeed log level consistent with the Trainer + ds_logger.setLevel(args.get_process_log_level()) + + if inference: + # only Z3 makes sense for the inference + if not hf_deepspeed_config.is_zero3(): + raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config") + + # in case the training config is re-used for inference + hf_deepspeed_config.del_config_sub_tree("optimizer") + hf_deepspeed_config.del_config_sub_tree("lr_scheduler") + optimizer, lr_scheduler = None, None + model_parameters = None + else: + trainer.optimizer = None # important for when deepspeed_init is used as re-init + model_parameters = list(filter(lambda p: p.requires_grad, model.parameters())) + optimizer, lr_scheduler = deepspeed_optim_sched( + trainer, hf_deepspeed_config, args, num_training_steps, model_parameters + ) + + # keep for quick debug: + # from pprint import pprint; pprint(config) + + return optimizer, lr_scheduler + + +def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path): + # it's possible that the user is trying to resume from model_path, which doesn't necessarily + # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's + # a resume from a checkpoint and not just a local pretrained weight. So we check here if the + # path contains what looks like a deepspeed checkpoint + import glob + + deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*")) + + if len(deepspeed_checkpoint_dirs) > 0: + logger.info(f"Attempting to resume from {checkpoint_path}") + # this magically updates self.optimizer and self.lr_scheduler + load_path, _ = deepspeed_engine.load_checkpoint( + checkpoint_path, load_optimizer_states=True, load_lr_scheduler_states=True + ) + if load_path is None: + raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}") + else: + raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}") diff --git a/modified/integrations/integration_utils.py b/modified/integrations/integration_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dbcbe0bc551ed68279718e0610272eab6becbd12 --- /dev/null +++ b/modified/integrations/integration_utils.py @@ -0,0 +1,1735 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Integrations with other Python libraries. +""" +import functools +import importlib.metadata +import importlib.util +import json +import numbers +import os +import pickle +import shutil +import sys +import tempfile +from dataclasses import asdict +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Union + +import numpy as np + +from .. import __version__ as version +from ..utils import flatten_dict, is_datasets_available, is_pandas_available, is_torch_available, logging + + +logger = logging.get_logger(__name__) + +if is_torch_available(): + import torch + +# comet_ml requires to be imported before any ML frameworks +_has_comet = importlib.util.find_spec("comet_ml") is not None and os.getenv("COMET_MODE", "").upper() != "DISABLED" +if _has_comet: + try: + import comet_ml # noqa: F401 + + if hasattr(comet_ml, "config") and comet_ml.config.get_config("comet.api_key"): + _has_comet = True + else: + if os.getenv("COMET_MODE", "").upper() != "DISABLED": + logger.warning("comet_ml is installed but `COMET_API_KEY` is not set.") + _has_comet = False + except (ImportError, ValueError): + _has_comet = False + +_has_neptune = ( + importlib.util.find_spec("neptune") is not None or importlib.util.find_spec("neptune-client") is not None +) +if TYPE_CHECKING and _has_neptune: + try: + _neptune_version = importlib.metadata.version("neptune") + logger.info(f"Neptune version {_neptune_version} available.") + except importlib.metadata.PackageNotFoundError: + try: + _neptune_version = importlib.metadata.version("neptune-client") + logger.info(f"Neptune-client version {_neptune_version} available.") + except importlib.metadata.PackageNotFoundError: + _has_neptune = False + +from ..trainer_callback import ProgressCallback, TrainerCallback # noqa: E402 +from ..trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # noqa: E402 +from ..training_args import ParallelMode # noqa: E402 +from ..utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available # noqa: E402 + + +# Integration functions: +def is_wandb_available(): + # any value of WANDB_DISABLED disables wandb + if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES: + logger.warning( + "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the " + "--report_to flag to control the integrations used for logging result (for instance --report_to none)." + ) + return False + return importlib.util.find_spec("wandb") is not None + + +def is_clearml_available(): + return importlib.util.find_spec("clearml") is not None + + +def is_comet_available(): + return _has_comet + + +def is_tensorboard_available(): + return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None + + +def is_optuna_available(): + return importlib.util.find_spec("optuna") is not None + + +def is_ray_available(): + return importlib.util.find_spec("ray") is not None + + +def is_ray_tune_available(): + if not is_ray_available(): + return False + return importlib.util.find_spec("ray.tune") is not None + + +def is_sigopt_available(): + return importlib.util.find_spec("sigopt") is not None + + +def is_azureml_available(): + if importlib.util.find_spec("azureml") is None: + return False + if importlib.util.find_spec("azureml.core") is None: + return False + return importlib.util.find_spec("azureml.core.run") is not None + + +def is_mlflow_available(): + if os.getenv("DISABLE_MLFLOW_INTEGRATION", "FALSE").upper() == "TRUE": + return False + return importlib.util.find_spec("mlflow") is not None + + +def is_dagshub_available(): + return None not in [importlib.util.find_spec("dagshub"), importlib.util.find_spec("mlflow")] + + +def is_neptune_available(): + return _has_neptune + + +def is_codecarbon_available(): + return importlib.util.find_spec("codecarbon") is not None + + +def is_flytekit_available(): + return importlib.util.find_spec("flytekit") is not None + + +def is_flyte_deck_standard_available(): + if not is_flytekit_available(): + return False + return importlib.util.find_spec("flytekitplugins.deck") is not None + + +def is_dvclive_available(): + return importlib.util.find_spec("dvclive") is not None + + +def hp_params(trial): + if is_optuna_available(): + import optuna + + if isinstance(trial, optuna.Trial): + return trial.params + if is_ray_tune_available(): + if isinstance(trial, dict): + return trial + + if is_sigopt_available(): + if isinstance(trial, dict): + return trial + + if is_wandb_available(): + if isinstance(trial, dict): + return trial + + raise RuntimeError(f"Unknown type for trial {trial.__class__}") + + +def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + import optuna + + if trainer.args.process_index == 0: + + def _objective(trial, checkpoint_dir=None): + checkpoint = None + if checkpoint_dir: + for subdir in os.listdir(checkpoint_dir): + if subdir.startswith(PREFIX_CHECKPOINT_DIR): + checkpoint = os.path.join(checkpoint_dir, subdir) + trainer.objective = None + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(trial) + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=checkpoint) + else: + trainer.train(resume_from_checkpoint=checkpoint, trial=trial) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return trainer.objective + + timeout = kwargs.pop("timeout", None) + n_jobs = kwargs.pop("n_jobs", 1) + directions = direction if isinstance(direction, list) else None + direction = None if directions is not None else direction + study = optuna.create_study(direction=direction, directions=directions, **kwargs) + study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) + if not study._is_multi_objective(): + best_trial = study.best_trial + return BestRun(str(best_trial.number), best_trial.value, best_trial.params) + else: + best_trials = study.best_trials + return [BestRun(str(best.number), best.values, best.params) for best in best_trials] + else: + for i in range(n_trials): + trainer.objective = None + args_main_rank = list(pickle.dumps(trainer.args)) + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(args_main_rank, src=0) + args = pickle.loads(bytes(args_main_rank)) + for key, value in asdict(args).items(): + if key != "local_rank": + setattr(trainer.args, key, value) + trainer.train(resume_from_checkpoint=None) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return None + + +def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + import ray + import ray.train + + def _objective(trial: dict, local_trainer): + try: + from transformers.utils.notebook import NotebookProgressCallback + + if local_trainer.pop_callback(NotebookProgressCallback): + local_trainer.add_callback(ProgressCallback) + except ModuleNotFoundError: + pass + + local_trainer.objective = None + + checkpoint = ray.train.get_checkpoint() + if checkpoint: + # Upon trial resume, the local_trainer's objective gets reset to None. + # If `local_trainer.train` is a noop (training has already reached + # the target number of epochs/steps), then this would + # trigger an unnecessary extra checkpoint at the end of training. + # -> Set the objective to a dummy value upon resume as a workaround. + local_trainer.objective = "objective" + + with checkpoint.as_directory() as checkpoint_dir: + checkpoint_path = next(Path(checkpoint_dir).glob(f"{PREFIX_CHECKPOINT_DIR}*")).as_posix() + local_trainer.train(resume_from_checkpoint=checkpoint_path, trial=trial) + else: + local_trainer.train(trial=trial) + + # If there hasn't been any evaluation during the training loop. + if getattr(local_trainer, "objective", None) is None: + metrics = local_trainer.evaluate() + local_trainer.objective = local_trainer.compute_objective(metrics) + + metrics.update({"objective": local_trainer.objective, "done": True}) + + with tempfile.TemporaryDirectory() as temp_checkpoint_dir: + local_trainer._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) + checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) + ray.train.report(metrics, checkpoint=checkpoint) + + if not trainer._memory_tracker.skip_memory_metrics: + from ..trainer_utils import TrainerMemoryTracker + + logger.warning( + "Memory tracking for your Trainer is currently " + "enabled. Automatically disabling the memory tracker " + "since the memory tracker is not serializable." + ) + trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) + + # The model and TensorBoard writer do not pickle so we have to remove them (if they exists) + # while doing the ray hp search. + _tb_writer = trainer.pop_callback(TensorBoardCallback) + trainer.model = None + + # Setup default `resources_per_trial`. + if "resources_per_trial" not in kwargs: + # Default to 1 CPU and 1 GPU (if applicable) per trial. + kwargs["resources_per_trial"] = {"cpu": 1} + if trainer.args.n_gpu > 0: + kwargs["resources_per_trial"]["gpu"] = 1 + resource_msg = "1 CPU" + (" and 1 GPU" if trainer.args.n_gpu > 0 else "") + logger.info( + "No `resources_per_trial` arg was passed into " + "`hyperparameter_search`. Setting it to a default value " + f"of {resource_msg} for each trial." + ) + # Make sure each trainer only uses GPUs that were allocated per trial. + gpus_per_trial = kwargs["resources_per_trial"].get("gpu", 0) + trainer.args._n_gpu = gpus_per_trial + + # Setup default `progress_reporter`. + if "progress_reporter" not in kwargs: + from ray.tune import CLIReporter + + kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"]) + + if "scheduler" in kwargs: + from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining + + # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. + if isinstance( + kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) + ) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == IntervalStrategy.NO): + raise RuntimeError( + "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " + "This means your trials will not report intermediate results to Ray Tune, and " + "can thus not be stopped early or used to exploit other trials parameters. " + "If this is what you want, do not use {cls}. If you would like to use {cls}, " + "make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the " + "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) + ) + + trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) + + @functools.wraps(trainable) + def dynamic_modules_import_trainable(*args, **kwargs): + """ + Wrapper around `tune.with_parameters` to ensure datasets_modules are loaded on each Actor. + + Without this, an ImportError will be thrown. See https://github.com/huggingface/transformers/issues/11565. + + Assumes that `_objective`, defined above, is a function. + """ + if is_datasets_available(): + import datasets.load + + dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), "__init__.py") + # load dynamic_modules from path + spec = importlib.util.spec_from_file_location("datasets_modules", dynamic_modules_path) + datasets_modules = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = datasets_modules + spec.loader.exec_module(datasets_modules) + return trainable(*args, **kwargs) + + # special attr set by tune.with_parameters + if hasattr(trainable, "__mixins__"): + dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ + + analysis = ray.tune.run( + dynamic_modules_import_trainable, + config=trainer.hp_space(None), + num_samples=n_trials, + **kwargs, + ) + best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3], scope=trainer.args.ray_scope) + best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config, analysis) + if _tb_writer is not None: + trainer.add_callback(_tb_writer) + return best_run + + +def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + import sigopt + + if trainer.args.process_index == 0: + if importlib.metadata.version("sigopt") >= "8.0.0": + sigopt.set_project("huggingface") + + experiment = sigopt.create_experiment( + name="huggingface-tune", + type="offline", + parameters=trainer.hp_space(None), + metrics=[{"name": "objective", "objective": direction, "strategy": "optimize"}], + parallel_bandwidth=1, + budget=n_trials, + ) + + logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + + for run in experiment.loop(): + with run: + trainer.objective = None + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(run.run) + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=None) + else: + trainer.train(resume_from_checkpoint=None, trial=run.run) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + run.log_metric("objective", trainer.objective) + + best = list(experiment.get_best_runs())[0] + best_run = BestRun(best.id, best.values["objective"].value, best.assignments) + else: + from sigopt import Connection + + conn = Connection() + proxies = kwargs.pop("proxies", None) + if proxies is not None: + conn.set_proxies(proxies) + + experiment = conn.experiments().create( + name="huggingface-tune", + parameters=trainer.hp_space(None), + metrics=[{"name": "objective", "objective": direction, "strategy": "optimize"}], + parallel_bandwidth=1, + observation_budget=n_trials, + project="huggingface", + ) + logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + + while experiment.progress.observation_count < experiment.observation_budget: + suggestion = conn.experiments(experiment.id).suggestions().create() + trainer.objective = None + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(suggestion) + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=None) + else: + trainer.train(resume_from_checkpoint=None, trial=suggestion) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + + values = [{"name": "objective", "value": trainer.objective}] + obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values) + logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]") + experiment = conn.experiments(experiment.id).fetch() + + best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0] + best_run = BestRun(best.id, best.value, best.assignments) + return best_run + else: + for i in range(n_trials): + trainer.objective = None + args_main_rank = list(pickle.dumps(trainer.args)) + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(args_main_rank, src=0) + args = pickle.loads(bytes(args_main_rank)) + for key, value in asdict(args).items(): + if key != "local_rank": + setattr(trainer.args, key, value) + trainer.train(resume_from_checkpoint=None) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return None + + +def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + from ..integrations import is_wandb_available + + if not is_wandb_available(): + raise ImportError("This function needs wandb installed: `pip install wandb`") + import wandb + + # add WandbCallback if not already added in trainer callbacks + reporting_to_wandb = False + for callback in trainer.callback_handler.callbacks: + if isinstance(callback, WandbCallback): + reporting_to_wandb = True + break + if not reporting_to_wandb: + trainer.add_callback(WandbCallback()) + trainer.args.report_to = ["wandb"] + best_trial = {"run_id": None, "objective": None, "hyperparameters": None} + sweep_id = kwargs.pop("sweep_id", None) + project = kwargs.pop("project", None) + name = kwargs.pop("name", None) + entity = kwargs.pop("entity", None) + metric = kwargs.pop("metric", "eval/loss") + + sweep_config = trainer.hp_space(None) + sweep_config["metric"]["goal"] = direction + sweep_config["metric"]["name"] = metric + if name: + sweep_config["name"] = name + + def _objective(): + run = wandb.run if wandb.run else wandb.init() + trainer.state.trial_name = run.name + run.config.update({"assignments": {}, "metric": metric}) + config = wandb.config + + trainer.objective = None + + trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"]) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + format_metrics = rewrite_logs(metrics) + if metric not in format_metrics: + logger.warning( + f"Provided metric {metric} not found. This might result in unexpected sweeps charts. The available" + f" metrics are {format_metrics.keys()}" + ) + best_score = False + if best_trial["run_id"] is not None: + if direction == "minimize": + best_score = trainer.objective < best_trial["objective"] + elif direction == "maximize": + best_score = trainer.objective > best_trial["objective"] + + if best_score or best_trial["run_id"] is None: + best_trial["run_id"] = run.id + best_trial["objective"] = trainer.objective + best_trial["hyperparameters"] = dict(config) + + return trainer.objective + + sweep_id = wandb.sweep(sweep_config, project=project, entity=entity) if not sweep_id else sweep_id + logger.info(f"wandb sweep id - {sweep_id}") + wandb.agent(sweep_id, function=_objective, count=n_trials) + + return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"]) + + +def get_available_reporting_integrations(): + integrations = [] + if is_azureml_available() and not is_mlflow_available(): + integrations.append("azure_ml") + if is_comet_available(): + integrations.append("comet_ml") + if is_dagshub_available(): + integrations.append("dagshub") + if is_dvclive_available(): + integrations.append("dvclive") + if is_mlflow_available(): + integrations.append("mlflow") + if is_neptune_available(): + integrations.append("neptune") + if is_tensorboard_available(): + integrations.append("tensorboard") + if is_wandb_available(): + integrations.append("wandb") + if is_codecarbon_available(): + integrations.append("codecarbon") + if is_clearml_available(): + integrations.append("clearml") + return integrations + + +def rewrite_logs(d): + new_d = {} + eval_prefix = "eval_" + eval_prefix_len = len(eval_prefix) + test_prefix = "test_" + test_prefix_len = len(test_prefix) + for k, v in d.items(): + if k.startswith(eval_prefix): + new_d["eval/" + k[eval_prefix_len:]] = v + elif k.startswith(test_prefix): + new_d["test/" + k[test_prefix_len:]] = v + else: + new_d["train/" + k] = v + return new_d + + +class TensorBoardCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard). + + Args: + tb_writer (`SummaryWriter`, *optional*): + The writer to use. Will instantiate one if not set. + """ + + def __init__(self, tb_writer=None): + has_tensorboard = is_tensorboard_available() + if not has_tensorboard: + raise RuntimeError( + "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or" + " install tensorboardX." + ) + if has_tensorboard: + try: + from torch.utils.tensorboard import SummaryWriter # noqa: F401 + + self._SummaryWriter = SummaryWriter + except ImportError: + try: + from tensorboardX import SummaryWriter + + self._SummaryWriter = SummaryWriter + except ImportError: + self._SummaryWriter = None + else: + self._SummaryWriter = None + self.tb_writer = tb_writer + + def _init_summary_writer(self, args, log_dir=None): + log_dir = log_dir or args.logging_dir + if self._SummaryWriter is not None: + self.tb_writer = self._SummaryWriter(log_dir=log_dir) + + def on_train_begin(self, args, state, control, **kwargs): + if not state.is_world_process_zero: + return + + log_dir = None + + if state.is_hyper_param_search: + trial_name = state.trial_name + if trial_name is not None: + log_dir = os.path.join(args.logging_dir, trial_name) + + if self.tb_writer is None: + self._init_summary_writer(args, log_dir) + + if self.tb_writer is not None: + self.tb_writer.add_text("args", args.to_json_string()) + if "model" in kwargs: + model = kwargs["model"] + if hasattr(model, "config") and model.config is not None: + model_config_json = model.config.to_json_string() + self.tb_writer.add_text("model_config", model_config_json) + + def on_log(self, args, state, control, logs=None, **kwargs): + if not state.is_world_process_zero: + return + + if self.tb_writer is None: + self._init_summary_writer(args) + + if self.tb_writer is not None: + logs = rewrite_logs(logs) + for k, v in logs.items(): + if isinstance(v, (int, float)): + self.tb_writer.add_scalar(k, v, state.global_step) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of Tensorboard's writer.add_scalar() " + "is incorrect so we dropped this attribute." + ) + self.tb_writer.flush() + + def on_train_end(self, args, state, control, **kwargs): + if self.tb_writer: + self.tb_writer.close() + self.tb_writer = None + + +class WandbCallback(TrainerCallback): + """ + A [`TrainerCallback`] that logs metrics, media, model checkpoints to [Weight and Biases](https://www.wandb.com/). + """ + + def __init__(self): + has_wandb = is_wandb_available() + if not has_wandb: + raise RuntimeError("WandbCallback requires wandb to be installed. Run `pip install wandb`.") + if has_wandb: + import wandb + + self._wandb = wandb + self._initialized = False + # log model + if os.getenv("WANDB_LOG_MODEL", "FALSE").upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"}): + DeprecationWarning( + f"Setting `WANDB_LOG_MODEL` as {os.getenv('WANDB_LOG_MODEL')} is deprecated and will be removed in " + "version 5 of transformers. Use one of `'end'` or `'checkpoint'` instead." + ) + logger.info(f"Setting `WANDB_LOG_MODEL` from {os.getenv('WANDB_LOG_MODEL')} to `end` instead") + self._log_model = "end" + else: + self._log_model = os.getenv("WANDB_LOG_MODEL", "false").lower() + + def setup(self, args, state, model, **kwargs): + """ + Setup the optional Weights & Biases (*wandb*) integration. + + One can subclass and override this method to customize the setup if needed. Find more information + [here](https://docs.wandb.ai/guides/integrations/huggingface). You can also override the following environment + variables: + + Environment: + - **WANDB_LOG_MODEL** (`str`, *optional*, defaults to `"false"`): + Whether to log model and checkpoints during training. Can be `"end"`, `"checkpoint"` or `"false"`. If set + to `"end"`, the model will be uploaded at the end of training. If set to `"checkpoint"`, the checkpoint + will be uploaded every `args.save_steps` . If set to `"false"`, the model will not be uploaded. Use along + with [`~transformers.TrainingArguments.load_best_model_at_end`] to upload best model. + + + + Setting `WANDB_LOG_MODEL` as `bool` will be deprecated in version 5 of 🤗 Transformers. + + + - **WANDB_WATCH** (`str`, *optional* defaults to `"false"`): + Can be `"gradients"`, `"all"`, `"parameters"`, or `"false"`. Set to `"all"` to log gradients and + parameters. + - **WANDB_PROJECT** (`str`, *optional*, defaults to `"huggingface"`): + Set this to a custom string to store results in a different project. + - **WANDB_DISABLED** (`bool`, *optional*, defaults to `False`): + Whether to disable wandb entirely. Set `WANDB_DISABLED=true` to disable. + """ + if self._wandb is None: + return + self._initialized = True + if state.is_world_process_zero: + logger.info( + 'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"' + ) + combined_dict = {**args.to_dict()} + + if hasattr(model, "config") and model.config is not None: + model_config = model.config.to_dict() + combined_dict = {**model_config, **combined_dict} + trial_name = state.trial_name + init_args = {} + if trial_name is not None: + init_args["name"] = trial_name + init_args["group"] = args.run_name + else: + if not (args.run_name is None or args.run_name == args.output_dir): + init_args["name"] = args.run_name + + if self._wandb.run is None: + self._wandb.init( + project=os.getenv("WANDB_PROJECT", "huggingface"), + **init_args, + ) + # add config parameters (run may have been created manually) + self._wandb.config.update(combined_dict, allow_val_change=True) + + # define default x-axis (for latest wandb versions) + if getattr(self._wandb, "define_metric", None): + self._wandb.define_metric("train/global_step") + self._wandb.define_metric("*", step_metric="train/global_step", step_sync=True) + + # keep track of model topology and gradients, unsupported on TPU + _watch_model = os.getenv("WANDB_WATCH", "false") + if not is_torch_tpu_available() and _watch_model in ("all", "parameters", "gradients"): + self._wandb.watch(model, log=_watch_model, log_freq=max(100, state.logging_steps)) + self._wandb.run._label(code="transformers_trainer") + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if self._wandb is None: + return + hp_search = state.is_hyper_param_search + if hp_search: + self._wandb.finish() + self._initialized = False + args.run_name = None + if not self._initialized: + self.setup(args, state, model, **kwargs) + + def on_train_end(self, args, state, control, model=None, tokenizer=None, **kwargs): + if self._wandb is None: + return + if self._log_model in ("end", "checkpoint") and self._initialized and state.is_world_process_zero: + from ..trainer import Trainer + + fake_trainer = Trainer(args=args, model=model, tokenizer=tokenizer) + with tempfile.TemporaryDirectory() as temp_dir: + fake_trainer.save_model(temp_dir) + metadata = ( + { + k: v + for k, v in dict(self._wandb.summary).items() + if isinstance(v, numbers.Number) and not k.startswith("_") + } + if not args.load_best_model_at_end + else { + f"eval/{args.metric_for_best_model}": state.best_metric, + "train/total_floss": state.total_flos, + } + ) + logger.info("Logging model artifacts. ...") + model_name = ( + f"model-{self._wandb.run.id}" + if (args.run_name is None or args.run_name == args.output_dir) + else f"model-{self._wandb.run.name}" + ) + artifact = self._wandb.Artifact(name=model_name, type="model", metadata=metadata) + for f in Path(temp_dir).glob("*"): + if f.is_file(): + with artifact.new_file(f.name, mode="wb") as fa: + fa.write(f.read_bytes()) + self._wandb.run.log_artifact(artifact) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + if self._wandb is None: + return + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + logs = rewrite_logs(logs) + self._wandb.log({**logs, "train/global_step": state.global_step}) + + def on_save(self, args, state, control, **kwargs): + if self._log_model == "checkpoint" and self._initialized and state.is_world_process_zero: + checkpoint_metadata = { + k: v + for k, v in dict(self._wandb.summary).items() + if isinstance(v, numbers.Number) and not k.startswith("_") + } + + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. ...") + checkpoint_name = ( + f"checkpoint-{self._wandb.run.id}" + if (args.run_name is None or args.run_name == args.output_dir) + else f"checkpoint-{self._wandb.run.name}" + ) + artifact = self._wandb.Artifact(name=checkpoint_name, type="model", metadata=checkpoint_metadata) + artifact.add_dir(artifact_path) + self._wandb.log_artifact(artifact, aliases=[f"checkpoint-{state.global_step}"]) + + +class CometCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [Comet ML](https://www.comet.ml/site/). + """ + + def __init__(self): + if not _has_comet: + raise RuntimeError("CometCallback requires comet-ml to be installed. Run `pip install comet-ml`.") + self._initialized = False + self._log_assets = False + + def setup(self, args, state, model): + """ + Setup the optional Comet.ml integration. + + Environment: + - **COMET_MODE** (`str`, *optional*, defaults to `ONLINE`): + Whether to create an online, offline experiment or disable Comet logging. Can be `OFFLINE`, `ONLINE`, or + `DISABLED`. + - **COMET_PROJECT_NAME** (`str`, *optional*): + Comet project name for experiments. + - **COMET_OFFLINE_DIRECTORY** (`str`, *optional*): + Folder to use for saving offline experiments when `COMET_MODE` is `OFFLINE`. + - **COMET_LOG_ASSETS** (`str`, *optional*, defaults to `TRUE`): + Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be `TRUE`, or + `FALSE`. + + For a number of configurable items in the environment, see + [here](https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables). + """ + self._initialized = True + log_assets = os.getenv("COMET_LOG_ASSETS", "FALSE").upper() + if log_assets in {"TRUE", "1"}: + self._log_assets = True + if state.is_world_process_zero: + comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() + experiment = None + experiment_kwargs = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} + if comet_mode == "ONLINE": + experiment = comet_ml.Experiment(**experiment_kwargs) + experiment.log_other("Created from", "transformers") + logger.info("Automatic Comet.ml online logging enabled") + elif comet_mode == "OFFLINE": + experiment_kwargs["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") + experiment = comet_ml.OfflineExperiment(**experiment_kwargs) + experiment.log_other("Created from", "transformers") + logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") + if experiment is not None: + experiment._set_model_graph(model, framework="transformers") + experiment._log_parameters(args, prefix="args/", framework="transformers") + if hasattr(model, "config"): + experiment._log_parameters(model.config, prefix="config/", framework="transformers") + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + experiment = comet_ml.config.get_global_experiment() + if experiment is not None: + experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework="transformers") + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + experiment = comet_ml.config.get_global_experiment() + if experiment is not None: + if self._log_assets is True: + logger.info("Logging checkpoints. This may take time.") + experiment.log_asset_folder( + args.output_dir, recursive=True, log_file_name=True, step=state.global_step + ) + experiment.end() + + +class AzureMLCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [AzureML](https://pypi.org/project/azureml-sdk/). + """ + + def __init__(self, azureml_run=None): + if not is_azureml_available(): + raise RuntimeError("AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.") + self.azureml_run = azureml_run + + def on_init_end(self, args, state, control, **kwargs): + from azureml.core.run import Run + + if self.azureml_run is None and state.is_world_process_zero: + self.azureml_run = Run.get_context() + + def on_log(self, args, state, control, logs=None, **kwargs): + if self.azureml_run and state.is_world_process_zero: + for k, v in logs.items(): + if isinstance(v, (int, float)): + self.azureml_run.log(k, v, description=k) + + +class MLflowCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [MLflow](https://www.mlflow.org/). Can be disabled by setting + environment variable `DISABLE_MLFLOW_INTEGRATION = TRUE`. + """ + + def __init__(self): + if not is_mlflow_available(): + raise RuntimeError("MLflowCallback requires mlflow to be installed. Run `pip install mlflow`.") + import mlflow + + self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH + self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH + + self._initialized = False + self._auto_end_run = False + self._log_artifacts = False + self._ml_flow = mlflow + + def setup(self, args, state, model): + """ + Setup the optional MLflow integration. + + Environment: + - **HF_MLFLOW_LOG_ARTIFACTS** (`str`, *optional*): + Whether to use MLflow `.log_artifact()` facility to log artifacts. This only makes sense if logging to a + remote server, e.g. s3 or GCS. If set to `True` or *1*, will copy each saved checkpoint on each save in + [`TrainingArguments`]'s `output_dir` to the local or remote artifact storage. Using it without a remote + storage will just copy the files to your artifact location. + - **MLFLOW_EXPERIMENT_NAME** (`str`, *optional*, defaults to `None`): + Whether to use an MLflow experiment_name under which to launch the run. Default to `None` which will point + to the `Default` experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be + activated. If an experiment with this name does not exist, a new experiment with this name is created. + - **MLFLOW_TAGS** (`str`, *optional*): + A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: + `os.environ['MLFLOW_TAGS']='{"release.candidate": "RC1", "release.version": "2.2.0"}'`. + - **MLFLOW_NESTED_RUN** (`str`, *optional*): + Whether to use MLflow nested runs. If set to `True` or *1*, will create a nested run inside the current + run. + - **MLFLOW_RUN_ID** (`str`, *optional*): + Allow to reattach to an existing run which can be usefull when resuming training from a checkpoint. When + `MLFLOW_RUN_ID` environment variable is set, `start_run` attempts to resume a run with the specified run ID + and other parameters are ignored. + - **MLFLOW_FLATTEN_PARAMS** (`str`, *optional*, defaults to `False`): + Whether to flatten the parameters dictionary before logging. + """ + self._log_artifacts = os.getenv("HF_MLFLOW_LOG_ARTIFACTS", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self._nested_run = os.getenv("MLFLOW_NESTED_RUN", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self._experiment_name = os.getenv("MLFLOW_EXPERIMENT_NAME", None) + self._flatten_params = os.getenv("MLFLOW_FLATTEN_PARAMS", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self._run_id = os.getenv("MLFLOW_RUN_ID", None) + logger.debug( + f"MLflow experiment_name={self._experiment_name}, run_name={args.run_name}, nested={self._nested_run}," + f" tags={self._nested_run}" + ) + if state.is_world_process_zero: + if self._ml_flow.active_run() is None or self._nested_run or self._run_id: + if self._experiment_name: + # Use of set_experiment() ensure that Experiment is created if not exists + self._ml_flow.set_experiment(self._experiment_name) + self._ml_flow.start_run(run_name=args.run_name, nested=self._nested_run) + logger.debug(f"MLflow run started with run_id={self._ml_flow.active_run().info.run_id}") + self._auto_end_run = True + combined_dict = args.to_dict() + if hasattr(model, "config") and model.config is not None: + model_config = model.config.to_dict() + combined_dict = {**model_config, **combined_dict} + combined_dict = flatten_dict(combined_dict) if self._flatten_params else combined_dict + # remove params that are too long for MLflow + for name, value in list(combined_dict.items()): + # internally, all values are converted to str in MLflow + if len(str(value)) > self._MAX_PARAM_VAL_LENGTH: + logger.warning( + f'Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' + " log_param() only accepts values no longer than 250 characters so we dropped this attribute." + " You can use `MLFLOW_FLATTEN_PARAMS` environment variable to flatten the parameters and" + " avoid this message." + ) + del combined_dict[name] + # MLflow cannot log more than 100 values in one go, so we have to split it + combined_dict_items = list(combined_dict.items()) + for i in range(0, len(combined_dict_items), self._MAX_PARAMS_TAGS_PER_BATCH): + self._ml_flow.log_params(dict(combined_dict_items[i : i + self._MAX_PARAMS_TAGS_PER_BATCH])) + mlflow_tags = os.getenv("MLFLOW_TAGS", None) + if mlflow_tags: + mlflow_tags = json.loads(mlflow_tags) + self._ml_flow.set_tags(mlflow_tags) + self._initialized = True + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, logs, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + metrics = {} + for k, v in logs.items(): + if isinstance(v, (int, float)): + metrics[k] = v + else: + logger.warning( + f'Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' + "MLflow's log_metric() only accepts float and int types so we dropped this attribute." + ) + self._ml_flow.log_metrics(metrics=metrics, step=state.global_step) + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + if self._auto_end_run and self._ml_flow.active_run(): + self._ml_flow.end_run() + + def on_save(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero and self._log_artifacts: + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. This may take time.") + self._ml_flow.pyfunc.log_model( + ckpt_dir, + artifacts={"model_path": artifact_path}, + python_model=self._ml_flow.pyfunc.PythonModel(), + ) + + def __del__(self): + # if the previous run is not terminated correctly, the fluent API will + # not let you start a new run before the previous one is killed + if ( + self._auto_end_run + and callable(getattr(self._ml_flow, "active_run", None)) + and self._ml_flow.active_run() is not None + ): + self._ml_flow.end_run() + + +class DagsHubCallback(MLflowCallback): + """ + A [`TrainerCallback`] that logs to [DagsHub](https://dagshub.com/). Extends [`MLflowCallback`] + """ + + def __init__(self): + super().__init__() + if not is_dagshub_available(): + raise ImportError("DagsHubCallback requires dagshub to be installed. Run `pip install dagshub`.") + + from dagshub.upload import Repo + + self.Repo = Repo + + def setup(self, *args, **kwargs): + """ + Setup the DagsHub's Logging integration. + + Environment: + - **HF_DAGSHUB_LOG_ARTIFACTS** (`str`, *optional*): + Whether to save the data and model artifacts for the experiment. Default to `False`. + """ + + self.log_artifacts = os.getenv("HF_DAGSHUB_LOG_ARTIFACTS", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self.name = os.getenv("HF_DAGSHUB_MODEL_NAME") or "main" + self.remote = os.getenv("MLFLOW_TRACKING_URI") + self.repo = self.Repo( + owner=self.remote.split(os.sep)[-2], + name=self.remote.split(os.sep)[-1].split(".")[0], + branch=os.getenv("BRANCH") or "main", + ) + self.path = Path("artifacts") + + if self.remote is None: + raise RuntimeError( + "DagsHubCallback requires the `MLFLOW_TRACKING_URI` environment variable to be set. Did you run" + " `dagshub.init()`?" + ) + + super().setup(*args, **kwargs) + + def on_train_end(self, args, state, control, **kwargs): + if self.log_artifacts: + if getattr(self, "train_dataloader", None): + torch.save(self.train_dataloader.dataset, os.path.join(args.output_dir, "dataset.pt")) + + self.repo.directory(str(self.path)).add_dir(args.output_dir) + + +class NeptuneMissingConfiguration(Exception): + def __init__(self): + super().__init__( + """ + ------ Unsupported ---- We were not able to create new runs. You provided a custom Neptune run to + `NeptuneCallback` with the `run` argument. For the integration to work fully, provide your `api_token` and + `project` by saving them as environment variables or passing them to the callback. + """ + ) + + +class NeptuneCallback(TrainerCallback): + """TrainerCallback that sends the logs to [Neptune](https://app.neptune.ai). + + Args: + api_token (`str`, *optional*): Neptune API token obtained upon registration. + You can leave this argument out if you have saved your token to the `NEPTUNE_API_TOKEN` environment + variable (strongly recommended). See full setup instructions in the + [docs](https://docs.neptune.ai/setup/installation). + project (`str`, *optional*): Name of an existing Neptune project, in the form "workspace-name/project-name". + You can find and copy the name in Neptune from the project settings -> Properties. If None (default), the + value of the `NEPTUNE_PROJECT` environment variable is used. + name (`str`, *optional*): Custom name for the run. + base_namespace (`str`, optional, defaults to "finetuning"): In the Neptune run, the root namespace + that will contain all of the metadata logged by the callback. + log_parameters (`bool`, *optional*, defaults to `True`): + If True, logs all Trainer arguments and model parameters provided by the Trainer. + log_checkpoints (`str`, *optional*): If "same", uploads checkpoints whenever they are saved by the Trainer. + If "last", uploads only the most recently saved checkpoint. If "best", uploads the best checkpoint (among + the ones saved by the Trainer). If `None`, does not upload checkpoints. + run (`Run`, *optional*): Pass a Neptune run object if you want to continue logging to an existing run. + Read more about resuming runs in the [docs](https://docs.neptune.ai/logging/to_existing_object). + **neptune_run_kwargs (*optional*): + Additional keyword arguments to be passed directly to the + [`neptune.init_run()`](https://docs.neptune.ai/api/neptune#init_run) function when a new run is created. + + For instructions and examples, see the [Transformers integration + guide](https://docs.neptune.ai/integrations/transformers) in the Neptune documentation. + """ + + integration_version_key = "source_code/integrations/transformers" + model_parameters_key = "model_parameters" + trial_name_key = "trial" + trial_params_key = "trial_params" + trainer_parameters_key = "trainer_parameters" + flat_metrics = {"train/epoch"} + + def __init__( + self, + *, + api_token: Optional[str] = None, + project: Optional[str] = None, + name: Optional[str] = None, + base_namespace: str = "finetuning", + run=None, + log_parameters: bool = True, + log_checkpoints: Optional[str] = None, + **neptune_run_kwargs, + ): + if not is_neptune_available(): + raise ValueError( + "NeptuneCallback requires the Neptune client library to be installed. " + "To install the library, run `pip install neptune`." + ) + + try: + from neptune import Run + from neptune.internal.utils import verify_type + except ImportError: + from neptune.new.internal.utils import verify_type + from neptune.new.metadata_containers.run import Run + + verify_type("api_token", api_token, (str, type(None))) + verify_type("project", project, (str, type(None))) + verify_type("name", name, (str, type(None))) + verify_type("base_namespace", base_namespace, str) + verify_type("run", run, (Run, type(None))) + verify_type("log_parameters", log_parameters, bool) + verify_type("log_checkpoints", log_checkpoints, (str, type(None))) + + self._base_namespace_path = base_namespace + self._log_parameters = log_parameters + self._log_checkpoints = log_checkpoints + self._initial_run: Optional[Run] = run + + self._run = None + self._is_monitoring_run = False + self._run_id = None + self._force_reset_monitoring_run = False + self._init_run_kwargs = {"api_token": api_token, "project": project, "name": name, **neptune_run_kwargs} + + self._volatile_checkpoints_dir = None + self._should_upload_checkpoint = self._log_checkpoints is not None + self._recent_checkpoint_path = None + + if self._log_checkpoints in {"last", "best"}: + self._target_checkpoints_namespace = f"checkpoints/{self._log_checkpoints}" + self._should_clean_recently_uploaded_checkpoint = True + else: + self._target_checkpoints_namespace = "checkpoints" + self._should_clean_recently_uploaded_checkpoint = False + + def _stop_run_if_exists(self): + if self._run: + self._run.stop() + del self._run + self._run = None + + def _initialize_run(self, **additional_neptune_kwargs): + try: + from neptune import init_run + from neptune.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException + except ImportError: + from neptune.new import init_run + from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException + + self._stop_run_if_exists() + + try: + self._run = init_run(**self._init_run_kwargs, **additional_neptune_kwargs) + self._run_id = self._run["sys/id"].fetch() + except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e: + raise NeptuneMissingConfiguration() from e + + def _use_initial_run(self): + self._run = self._initial_run + self._is_monitoring_run = True + self._run_id = self._run["sys/id"].fetch() + self._initial_run = None + + def _ensure_run_with_monitoring(self): + if self._initial_run is not None: + self._use_initial_run() + else: + if not self._force_reset_monitoring_run and self._is_monitoring_run: + return + + if self._run and not self._is_monitoring_run and not self._force_reset_monitoring_run: + self._initialize_run(with_id=self._run_id) + self._is_monitoring_run = True + else: + self._initialize_run() + self._force_reset_monitoring_run = False + + def _ensure_at_least_run_without_monitoring(self): + if self._initial_run is not None: + self._use_initial_run() + else: + if not self._run: + self._initialize_run( + with_id=self._run_id, + capture_stdout=False, + capture_stderr=False, + capture_hardware_metrics=False, + capture_traceback=False, + ) + self._is_monitoring_run = False + + @property + def run(self): + if self._run is None: + self._ensure_at_least_run_without_monitoring() + return self._run + + @property + def _metadata_namespace(self): + return self.run[self._base_namespace_path] + + def _log_integration_version(self): + self.run[NeptuneCallback.integration_version_key] = version + + def _log_trainer_parameters(self, args): + self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict() + + def _log_model_parameters(self, model): + from neptune.utils import stringify_unsupported + + if model and hasattr(model, "config") and model.config is not None: + self._metadata_namespace[NeptuneCallback.model_parameters_key] = stringify_unsupported( + model.config.to_dict() + ) + + def _log_hyper_param_search_parameters(self, state): + if state and hasattr(state, "trial_name"): + self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name + + if state and hasattr(state, "trial_params") and state.trial_params is not None: + self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params + + def _log_model_checkpoint(self, source_directory: str, checkpoint: str): + target_path = relative_path = os.path.join(source_directory, checkpoint) + + if self._volatile_checkpoints_dir is not None: + consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint) + try: + # Remove leading ../ from a relative path. + cpkt_path = relative_path.replace("..", "").lstrip(os.path.sep) + copy_path = os.path.join(consistent_checkpoint_path, cpkt_path) + shutil.copytree(relative_path, copy_path) + target_path = consistent_checkpoint_path + except IOError as e: + logger.warning( + "NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{}'. " + "Could fail trying to upload.".format(e) + ) + + self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path) + + if self._should_clean_recently_uploaded_checkpoint and self._recent_checkpoint_path is not None: + self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path) + + self._recent_checkpoint_path = relative_path + + def on_init_end(self, args, state, control, **kwargs): + self._volatile_checkpoints_dir = None + if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): + self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name + + if self._log_checkpoints == "best" and not args.load_best_model_at_end: + raise ValueError("To save the best model checkpoint, the load_best_model_at_end argument must be enabled.") + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not state.is_world_process_zero: + return + + self._ensure_run_with_monitoring() + self._force_reset_monitoring_run = True + + self._log_integration_version() + if self._log_parameters: + self._log_trainer_parameters(args) + self._log_model_parameters(model) + + if state.is_hyper_param_search: + self._log_hyper_param_search_parameters(state) + + def on_train_end(self, args, state, control, **kwargs): + self._stop_run_if_exists() + + def __del__(self): + if self._volatile_checkpoints_dir is not None: + shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True) + + self._stop_run_if_exists() + + def on_save(self, args, state, control, **kwargs): + if self._should_upload_checkpoint: + self._log_model_checkpoint(args.output_dir, f"checkpoint-{state.global_step}") + + def on_evaluate(self, args, state, control, metrics=None, **kwargs): + if self._log_checkpoints == "best": + best_metric_name = args.metric_for_best_model + if not best_metric_name.startswith("eval_"): + best_metric_name = f"eval_{best_metric_name}" + + metric_value = metrics.get(best_metric_name) + + operator = np.greater if args.greater_is_better else np.less + + self._should_upload_checkpoint = state.best_metric is None or operator(metric_value, state.best_metric) + + @classmethod + def get_run(cls, trainer): + for callback in trainer.callback_handler.callbacks: + if isinstance(callback, cls): + return callback.run + + raise Exception("The trainer doesn't have a NeptuneCallback configured.") + + def on_log(self, args, state, control, logs: Optional[Dict[str, float]] = None, **kwargs): + if not state.is_world_process_zero: + return + + if logs is not None: + for name, value in rewrite_logs(logs).items(): + if isinstance(value, (int, float)): + if name in NeptuneCallback.flat_metrics: + self._metadata_namespace[name] = value + else: + self._metadata_namespace[name].log(value, step=state.global_step) + + +class CodeCarbonCallback(TrainerCallback): + """ + A [`TrainerCallback`] that tracks the CO2 emission of training. + """ + + def __init__(self): + if not is_codecarbon_available(): + raise RuntimeError( + "CodeCarbonCallback requires `codecarbon` to be installed. Run `pip install codecarbon`." + ) + import codecarbon + + self._codecarbon = codecarbon + self.tracker = None + + def on_init_end(self, args, state, control, **kwargs): + if self.tracker is None and state.is_local_process_zero: + # CodeCarbon will automatically handle environment variables for configuration + self.tracker = self._codecarbon.EmissionsTracker(output_dir=args.output_dir) + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if self.tracker and state.is_local_process_zero: + self.tracker.start() + + def on_train_end(self, args, state, control, **kwargs): + if self.tracker and state.is_local_process_zero: + self.tracker.stop() + + +class ClearMLCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [ClearML](https://clear.ml/). + + Environment: + - **CLEARML_PROJECT** (`str`, *optional*, defaults to `HuggingFace Transformers`): + ClearML project name. + - **CLEARML_TASK** (`str`, *optional*, defaults to `Trainer`): + ClearML task name. + - **CLEARML_LOG_MODEL** (`bool`, *optional*, defaults to `False`): + Whether to log models as artifacts during training. + """ + + def __init__(self): + if is_clearml_available(): + import clearml + + self._clearml = clearml + else: + raise RuntimeError("ClearMLCallback requires 'clearml' to be installed. Run `pip install clearml`.") + + self._initialized = False + self._initialized_externally = False + self._clearml_task = None + + self._log_model = os.getenv("CLEARML_LOG_MODEL", "FALSE").upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"}) + + def setup(self, args, state, model, tokenizer, **kwargs): + if self._clearml is None: + return + if self._initialized: + return + if state.is_world_process_zero: + logger.info("Automatic ClearML logging enabled.") + if self._clearml_task is None: + # This might happen when running inside of a pipeline, where the task is already initialized + # from outside of Hugging Face + if self._clearml.Task.current_task(): + self._clearml_task = self._clearml.Task.current_task() + self._initialized = True + self._initialized_externally = True + logger.info("External ClearML Task has been connected.") + else: + self._clearml_task = self._clearml.Task.init( + project_name=os.getenv("CLEARML_PROJECT", "HuggingFace Transformers"), + task_name=os.getenv("CLEARML_TASK", "Trainer"), + auto_connect_frameworks={"tensorboard": False, "pytorch": False}, + output_uri=True, + ) + self._initialized = True + logger.info("ClearML Task has been initialized.") + + self._clearml_task.connect(args, "Args") + if hasattr(model, "config") and model.config is not None: + self._clearml_task.connect(model.config, "Model Configuration") + + def on_train_begin(self, args, state, control, model=None, tokenizer=None, **kwargs): + if self._clearml is None: + return + if state.is_hyper_param_search: + self._initialized = False + if not self._initialized: + self.setup(args, state, model, tokenizer, **kwargs) + + def on_train_end(self, args, state, control, model=None, tokenizer=None, metrics=None, logs=None, **kwargs): + if self._clearml is None: + return + if self._clearml_task and state.is_world_process_zero and not self._initialized_externally: + # Close ClearML Task at the end end of training + self._clearml_task.close() + + def on_log(self, args, state, control, model=None, tokenizer=None, logs=None, **kwargs): + if self._clearml is None: + return + if not self._initialized: + self.setup(args, state, model, tokenizer, **kwargs) + if state.is_world_process_zero: + eval_prefix = "eval_" + eval_prefix_len = len(eval_prefix) + test_prefix = "test_" + test_prefix_len = len(test_prefix) + single_value_scalars = [ + "train_runtime", + "train_samples_per_second", + "train_steps_per_second", + "train_loss", + "total_flos", + "epoch", + ] + for k, v in logs.items(): + if isinstance(v, (int, float)): + if k in single_value_scalars: + self._clearml_task.get_logger().report_single_value(name=k, value=v) + elif k.startswith(eval_prefix): + self._clearml_task.get_logger().report_scalar( + title=k[eval_prefix_len:], series="eval", value=v, iteration=state.global_step + ) + elif k.startswith(test_prefix): + self._clearml_task.get_logger().report_scalar( + title=k[test_prefix_len:], series="test", value=v, iteration=state.global_step + ) + else: + self._clearml_task.get_logger().report_scalar( + title=k, series="train", value=v, iteration=state.global_step + ) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of ClearML logger's report_scalar() " + "is incorrect so we dropped this attribute." + ) + + def on_save(self, args, state, control, **kwargs): + if self._log_model and self._clearml_task and state.is_world_process_zero: + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. This may take time.") + self._clearml_task.update_output_model(artifact_path, iteration=state.global_step, auto_delete_file=False) + + +class FlyteCallback(TrainerCallback): + """A [`TrainerCallback`] that sends the logs to [Flyte](https://flyte.org/). + NOTE: This callback only works within a Flyte task. + + Args: + save_log_history (`bool`, *optional*, defaults to `True`): + When set to True, the training logs are saved as a Flyte Deck. + + sync_checkpoints (`bool`, *optional*, defaults to `True`): + When set to True, checkpoints are synced with Flyte and can be used to resume training in the case of an + interruption. + + Example: + + ```python + # Note: This example skips over some setup steps for brevity. + from flytekit import current_context, task + + + @task + def train_hf_transformer(): + cp = current_context().checkpoint + trainer = Trainer(..., callbacks=[FlyteCallback()]) + output = trainer.train(resume_from_checkpoint=cp.restore()) + ``` + """ + + def __init__(self, save_log_history: bool = True, sync_checkpoints: bool = True): + super().__init__() + if not is_flytekit_available(): + raise ImportError("FlyteCallback requires flytekit to be installed. Run `pip install flytekit`.") + + if not is_flyte_deck_standard_available() or not is_pandas_available(): + logger.warning( + "Syncing log history requires both flytekitplugins-deck-standard and pandas to be installed. " + "Run `pip install flytekitplugins-deck-standard pandas` to enable this feature." + ) + save_log_history = False + + from flytekit import current_context + + self.cp = current_context().checkpoint + self.save_log_history = save_log_history + self.sync_checkpoints = sync_checkpoints + + def on_save(self, args, state, control, **kwargs): + if self.sync_checkpoints and state.is_world_process_zero: + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + + logger.info(f"Syncing checkpoint in {ckpt_dir} to Flyte. This may take time.") + self.cp.save(artifact_path) + + def on_train_end(self, args, state, control, **kwargs): + if self.save_log_history: + import pandas as pd + from flytekit import Deck + from flytekitplugins.deck.renderer import TableRenderer + + log_history_df = pd.DataFrame(state.log_history) + Deck("Log History", TableRenderer().to_html(log_history_df)) + + +class DVCLiveCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). + + Use the environment variables below in `setup` to configure the integration. To customize this callback beyond + those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). + + Args: + live (`dvclive.Live`, *optional*, defaults to `None`): + Optional Live instance. If None, a new instance will be created using **kwargs. + log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): + Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, + the final checkpoint is logged at the end of training. If set to `"all"`, the entire + [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. + """ + + def __init__( + self, + live: Optional[Any] = None, + log_model: Optional[Union[Literal["all"], bool]] = None, + **kwargs, + ): + if not is_dvclive_available(): + raise RuntimeError("DVCLiveCallback requires dvclive to be installed. Run `pip install dvclive`.") + from dvclive import Live + + self._log_model = log_model + + self._initialized = False + self.live = None + if isinstance(live, Live): + self.live = live + self._initialized = True + elif live is not None: + raise RuntimeError(f"Found class {live.__class__} for live, expected dvclive.Live") + + def setup(self, args, state, model): + """ + Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see + [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). + + Environment: + - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): + Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or + *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire + [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. + """ + from dvclive import Live + + self._initalized = True + if self._log_model is not None: + log_model_env = os.getenv("HF_DVCLIVE_LOG_MODEL") + if log_model_env.upper() in ENV_VARS_TRUE_VALUES: + self._log_model = True + elif log_model_env.lower() == "all": + self._log_model = "all" + if state.is_world_process_zero: + if not self.live: + self.live = Live() + self.live.log_params(args.to_dict()) + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + from dvclive.plots import Metric + from dvclive.utils import standardize_metric_name + + for key, value in logs.items(): + if Metric.could_log(value): + self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{value}" of type {type(value)} for key "{key}" as a scalar. ' + "This invocation of DVCLive's Live.log_metric() " + "is incorrect so we dropped this attribute." + ) + self.live.next_step() + + def on_save(self, args, state, control, **kwargs): + if self._log_model == "all" and self._initialized and state.is_world_process_zero: + self.live.log_artifact(args.output_dir) + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + from transformers.trainer import Trainer + + if self._log_model is True: + fake_trainer = Trainer(args=args, model=kwargs.get("model"), tokenizer=kwargs.get("tokenizer")) + name = "best" if args.load_best_model_at_end else "last" + output_dir = os.path.join(args.output_dir, name) + fake_trainer.save_model(output_dir) + self.live.log_artifact(output_dir, name=name, type="model", copy=True) + self.live.end() + + +INTEGRATION_TO_CALLBACK = { + "azure_ml": AzureMLCallback, + "comet_ml": CometCallback, + "mlflow": MLflowCallback, + "neptune": NeptuneCallback, + "tensorboard": TensorBoardCallback, + "wandb": WandbCallback, + "codecarbon": CodeCarbonCallback, + "clearml": ClearMLCallback, + "dagshub": DagsHubCallback, + "flyte": FlyteCallback, + "dvclive": DVCLiveCallback, +} + + +def get_reporting_integration_callbacks(report_to): + for integration in report_to: + if integration not in INTEGRATION_TO_CALLBACK: + raise ValueError( + f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported." + ) + + return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to] diff --git a/modified/integrations/peft.py b/modified/integrations/peft.py new file mode 100644 index 0000000000000000000000000000000000000000..e04d2399527c1b4a0ad9556751aff6da4ab13ec1 --- /dev/null +++ b/modified/integrations/peft.py @@ -0,0 +1,476 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import warnings +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from ..utils import ( + check_peft_version, + find_adapter_config_file, + is_accelerate_available, + is_peft_available, + is_torch_available, + logging, +) + + +if is_accelerate_available(): + from accelerate import dispatch_model + from accelerate.utils import get_balanced_memory, infer_auto_device_map + +# Minimum PEFT version supported for the integration +MIN_PEFT_VERSION = "0.5.0" + +if TYPE_CHECKING: + if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class PeftAdapterMixin: + """ + A class containing all functions for loading and using adapters weights that are supported in PEFT library. For + more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT + library: https://huggingface.co/docs/peft/index + + Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods + that anyone can load, train and run with this mixin class: + - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora + - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 + - AdaLora: https://arxiv.org/abs/2303.10512 + + Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" + into a torch module. For using these methods, please refer to the usage guide of PEFT library. + + With this mixin, if the correct PEFT version is installed, it is possible to: + + - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model + - Attach new adapters in the model and train them with Trainer or by your own. + - Attach multiple adapters and iteratively activate / deactivate them + - Activate / deactivate all adapters from the model. + - Get the `state_dict` of the active adapter. + """ + + _hf_peft_config_loaded = False + + def load_adapter( + self, + peft_model_id: Optional[str] = None, + adapter_name: Optional[str] = None, + revision: Optional[str] = None, + token: Optional[str] = None, + device_map: Optional[str] = "auto", + max_memory: Optional[str] = None, + offload_folder: Optional[str] = None, + offload_index: Optional[int] = None, + peft_config: Dict[str, Any] = None, + adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None, + adapter_kwargs: Optional[Dict[str, Any]] = None, + ) -> None: + """ + Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we + invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft + + Requires peft as a backend to load the adapter weights. + + Args: + peft_model_id (`str`, *optional*): + The identifier of the model to look for on the Hub, or a local path to the saved adapter config file + and adapter weights. + adapter_name (`str`, *optional*): + The adapter name to use. If not set, will use the default adapter. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + token (`str`, `optional`): + Whether to use authentication token to load the remote folder. Userful to load private repositories + that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to + cache it. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, `optional`): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_index (`int`, `optional`): + `offload_index` argument to be passed to `accelerate.dispatch_model` method. + peft_config (`Dict[str, Any]`, *optional*): + The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts + methods. This argument is used in case users directly pass PEFT state dicts + adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the adapter to load. This argument is used in case users directly pass PEFT state + dicts + adapter_kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and + `find_adapter_config_file` method. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + adapter_name = adapter_name if adapter_name is not None else "default" + if adapter_kwargs is None: + adapter_kwargs = {} + + from peft import PeftConfig, inject_adapter_in_model, load_peft_weights + from peft.utils import set_peft_model_state_dict + + if self._hf_peft_config_loaded and adapter_name in self.peft_config: + raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") + + if peft_model_id is None and (adapter_state_dict is None and peft_config is None): + raise ValueError( + "You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter." + ) + + # We keep `revision` in the signature for backward compatibility + if revision is not None and "revision" not in adapter_kwargs: + adapter_kwargs["revision"] = revision + elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]: + logger.error( + "You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. " + "The one in `adapter_kwargs` will be used." + ) + + # Override token with adapter_kwargs' token + if "token" in adapter_kwargs: + token = adapter_kwargs.pop("token") + + if peft_config is None: + adapter_config_file = find_adapter_config_file( + peft_model_id, + token=token, + **adapter_kwargs, + ) + + if adapter_config_file is None: + raise ValueError( + f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the " + "adapter model." + ) + + peft_config = PeftConfig.from_pretrained( + peft_model_id, + token=token, + **adapter_kwargs, + ) + + # Create and add fresh new adapters into the model. + inject_adapter_in_model(peft_config, self, adapter_name) + + if not self._hf_peft_config_loaded: + self._hf_peft_config_loaded = True + + if peft_model_id is not None: + adapter_state_dict = load_peft_weights(peft_model_id, token=token, **adapter_kwargs) + + # We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility + processed_adapter_state_dict = {} + prefix = "base_model.model." + for key, value in adapter_state_dict.items(): + if key.startswith(prefix): + new_key = key[len(prefix) :] + else: + new_key = key + processed_adapter_state_dict[new_key] = value + + # Load state dict + incompatible_keys = set_peft_model_state_dict(self, processed_adapter_state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0: + logger.warning( + f"Loading adapter weights from {peft_model_id} led to unexpected keys not found in the model: " + f" {incompatible_keys.unexpected_keys}. " + ) + + # Re-dispatch model and hooks in case the model is offloaded to CPU / Disk. + if ( + (getattr(self, "hf_device_map", None) is not None) + and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) + and len(self.peft_config) == 1 + ): + self._dispatch_accelerate_model( + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_index=offload_index, + ) + + def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None: + r""" + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default + name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the + default adapter name). + + Args: + adapter_config (`~peft.PeftConfig`): + The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts + methods + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + from peft import PeftConfig, inject_adapter_in_model + + adapter_name = adapter_name or "default" + + if not self._hf_peft_config_loaded: + self._hf_peft_config_loaded = True + elif adapter_name in self.peft_config: + raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") + + if not isinstance(adapter_config, PeftConfig): + raise ValueError( + f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." + ) + + # Retrieve the name or path of the model, one could also use self.config._name_or_path + # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100 + adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None) + inject_adapter_in_model(adapter_config, self, adapter_name) + + self.set_adapter(adapter_name) + + def set_adapter(self, adapter_name: Union[List[str], str]) -> None: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters. + + Args: + adapter_name (`Union[List[str], str]`): + The name of the adapter to set. Can be also a list of strings to set multiple adapters. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + elif isinstance(adapter_name, list): + missing = set(adapter_name) - set(self.peft_config) + if len(missing) > 0: + raise ValueError( + f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." + f" current loaded adapters are: {list(self.peft_config.keys())}" + ) + elif adapter_name not in self.peft_config: + raise ValueError( + f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}" + ) + + from peft.tuners.tuners_utils import BaseTunerLayer + from peft.utils import ModulesToSaveWrapper + + _adapters_has_been_set = False + + for _, module in self.named_modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + # For backward compatbility with previous PEFT versions + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_name) + else: + module.active_adapter = adapter_name + _adapters_has_been_set = True + + if not _adapters_has_been_set: + raise ValueError( + "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." + ) + + def disable_adapters(self) -> None: + r""" + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Disable all adapters that are attached to the model. This leads to inferring with the base model only. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + from peft.utils import ModulesToSaveWrapper + + for _, module in self.named_modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + # The recent version of PEFT need to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=False) + else: + module.disable_adapters = True + + def enable_adapters(self) -> None: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Enable adapters that are attached to the model. The model will use `self.active_adapter()` + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + # The recent version of PEFT need to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=True) + else: + module.disable_adapters = False + + def active_adapters(self) -> List[str]: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters + for inference) returns the list of all active adapters so that users can deal with them accordingly. + + For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return + a single string. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not is_peft_available(): + raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + active_adapters = module.active_adapter + break + + # For previous PEFT versions + if isinstance(active_adapters, str): + active_adapters = [active_adapters] + + return active_adapters + + def active_adapter(self) -> str: + warnings.warn( + "The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning + ) + + return self.active_adapters()[0] + + def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. + If no adapter_name is passed, the active adapter is used. + + Args: + adapter_name (`str`, *optional*): + The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft import get_peft_model_state_dict + + if adapter_name is None: + adapter_name = self.active_adapter() + + adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name) + return adapter_state_dict + + def _dispatch_accelerate_model( + self, + device_map: str, + max_memory: Optional[int] = None, + offload_folder: Optional[str] = None, + offload_index: Optional[int] = None, + ) -> None: + """ + Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with + accelerate (i.e. with `device_map=xxx`) + + Args: + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_index (`int`, *optional*): + The offload_index argument to be passed to `accelerate.dispatch_model` method. + """ + dispatch_model_kwargs = {} + # Safety checker for previous `accelerate` versions + # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ + if "offload_index" in inspect.signature(dispatch_model).parameters: + dispatch_model_kwargs["offload_index"] = offload_index + + no_split_module_classes = self._no_split_modules + + if device_map != "sequential": + max_memory = get_balanced_memory( + self, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + low_zero=(device_map == "balanced_low_0"), + ) + if isinstance(device_map, str): + device_map = infer_auto_device_map( + self, max_memory=max_memory, no_split_module_classes=no_split_module_classes + ) + dispatch_model( + self, + device_map=device_map, + offload_dir=offload_folder, + **dispatch_model_kwargs, + ) diff --git a/modified/modeling_attn_mask_utils.py b/modified/modeling_attn_mask_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..734f443e1fc9d44a1fdeba8b5a77112665516cd1 --- /dev/null +++ b/modified/modeling_attn_mask_utils.py @@ -0,0 +1,470 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch + + +@dataclass +class AttentionMaskConverter: + """ + A utility attention mask class that allows one to: + - Create a causal 4d mask + - Create a causal 4d mask with slided window + - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, + key_value_length) that can be multiplied with attention scores + + Examples: + + ```python + >>> import torch + >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter + + >>> converter = AttentionMaskConverter(True) + >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32) + tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]]) + ``` + + Parameters: + is_causal (`bool`): + Whether the attention mask should be a uni-directional (causal) or bi-directional mask. + + sliding_window (`int`, *optional*): + Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. + """ + + is_causal: bool + sliding_window: int + + def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): + self.is_causal = is_causal + self.sliding_window = sliding_window + + if self.sliding_window is not None and self.sliding_window <= 0: + raise ValueError( + f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`" + ) + + def to_causal_4d( + self, + batch_size: int, + query_length: int, + key_value_length: int, + dtype: torch.dtype, + device: Union[torch.device, "str"] = "cpu", + ) -> Optional[torch.Tensor]: + """ + Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative + bias to upper right hand triangular matrix (causal mask). + """ + if not self.is_causal: + raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") + + # If shape is not cached, create a new causal mask and cache it + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 or self.sliding_window is not None: + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, + ) + + return causal_4d_mask + + def to_4d( + self, + attention_mask_2d: torch.Tensor, + query_length: int, + dtype: torch.dtype, + key_value_length: Optional[int] = None, + ) -> torch.Tensor: + """ + Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, + key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is + causal, a causal mask will be added. + """ + input_shape = (attention_mask_2d.shape[0], query_length) + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: + if key_value_length is None: + raise ValueError( + "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask." + ) + + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, + ) + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") + + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device + ) + if causal_4d_mask is not None: + expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min) + + # expanded_attn_mask + causal_4d_mask can cause some overflow + expanded_4d_mask = expanded_attn_mask + + return expanded_4d_mask + + @staticmethod + def _make_causal_mask( + input_ids_shape: torch.Size, + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0, + sliding_window: Optional[int] = None, + ): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + + # add lower triangular sliding window mask if necessary + if sliding_window is not None: + diagonal = past_key_values_length - sliding_window + 1 + + context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal) + mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) + + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + @staticmethod + def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + @staticmethod + def _unmask_unattended( + expanded_mask: torch.Tensor, attention_mask: torch.Tensor, unmasked_value: Union[bool, float] + ): + # fmt: off + """ + Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when + using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + Details: https://github.com/pytorch/pytorch/issues/110213 + + `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len]. + `attention_mask` is [bsz, src_seq_len]. + + The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias. + + For example, if `attention_mask` is + ``` + [[0, 0, 1], + [1, 1, 1], + [0, 1, 1]] + ``` + and `expanded_mask` is (e.g. here left-padding case) + ``` + [[[[0, 0, 0], + [0, 0, 0], + [0, 0, 1]]], + [[[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]], + [[[0, 0, 0], + [0, 1, 0], + [0, 1, 1]]]] + ``` + then the modified `expanded_mask` will be + ``` + [[[[1, 1, 1], <-- modified + [1, 1, 1], <-- modified + [0, 0, 1]]], + [[[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]], + [[[1, 1, 1], <-- modified + [0, 1, 0], + [0, 1, 1]]]] + ``` + """ + # fmt: on + + # Get the index of the first non-zero value for every sample in the batch. + # In the above example, indices = [[2], [0], [1]]] + tmp = torch.arange(attention_mask.shape[1], 0, -1) + indices = torch.argmax(attention_mask.cpu() * tmp, 1, keepdim=True) + + # Find the batch indexes that have unattended tokens on the leftmost side (e.g. [0, 0, 1, 1, 1]), for which the first rows of the + # expanded mask will be completely unattended. + left_masked_rows = torch.where(indices > 0)[0] + + if left_masked_rows.shape[0] == 0: + return expanded_mask + indices = indices[left_masked_rows] + + max_len = torch.max(indices) + range_tensor = torch.arange(max_len).unsqueeze(0) + range_tensor = range_tensor.repeat(indices.size(0), 1) + + # Avoid unmasking tokens at relevant target positions (on the row axis), by rather unmasking possibly several times the first row that should always be unmasked as we filtered out the batch above. + range_tensor[range_tensor >= indices] = 0 + + # TODO: we may drop support for 3D attention mask as the refactor from Patrick maybe dropped this case + if expanded_mask.dim() == 4: + num_masks = expanded_mask.shape[1] + if num_masks == 1: + # Broadcast [left_masked_rows, 1], [left_masked_rows, max_len] + mask_slice = (left_masked_rows[:, None], 0, range_tensor) + else: + # Broadcast [left_masked_rows, 1, 1], [1, num_masks, 1], [left_masked_rows, 1, max_len] + mask_slice = ( + left_masked_rows[:, None, None], + torch.arange(num_masks)[None, :, None], + range_tensor[:, None, :], + ) + else: + # Broadcast [left_masked_rows, 1], [left_masked_rows, max_len] + mask_slice = (left_masked_rows[:, None], range_tensor) + + expanded_mask[mask_slice] = unmasked_value + + return expanded_mask + + +def _prepare_4d_causal_attention_mask( + attention_mask: Optional[torch.Tensor], + input_shape: Union[torch.Size, Tuple, List], + inputs_embeds: torch.Tensor, + past_key_values_length: int, + sliding_window: Optional[int] = None, +): + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)` + + Args: + attention_mask (`torch.Tensor` or `None`): + A 2D attention mask of shape `(batch_size, key_value_length)` + input_shape (`tuple(int)` or `list(int)` or `torch.Size`): + The input shape should be a tuple that defines `(batch_size, query_length)`. + inputs_embeds (`torch.Tensor`): + The embedded inputs as a torch Tensor. + past_key_values_length (`int`): + The length of the key value cache. + sliding_window (`int`, *optional*): + If the model uses windowed attention, a sliding window should be passed. + """ + attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) + + key_value_length = input_shape[-1] + past_key_values_length + + # 4d mask is passed through the layers + if attention_mask is not None: + attention_mask = attn_mask_converter.to_4d( + attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype + ) + else: + attention_mask = attn_mask_converter.to_causal_4d( + input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device + ) + + return attention_mask + + +# Adapted from _prepare_4d_causal_attention_mask +def _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask: Optional[torch.Tensor], + input_shape: Union[torch.Size, Tuple, List], + inputs_embeds: torch.Tensor, + past_key_values_length: int, + sliding_window: Optional[int] = None, +): + """ + Prepares the correct `attn_mask` argument to be used by `torch.nn.functional.scaled_dot_product_attention`. + + In case no token is masked in the `attention_mask` argument, we simply set it to `None` for the cases `query_length == 1` and + `key_value_length == query_length`, and rely instead on SDPA `is_causal` argument to use causal/non-causal masks, + allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed). + """ + attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) + + key_value_length = input_shape[-1] + past_key_values_length + batch_size, query_length = input_shape + + # torch.jit.trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1` + # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing. + # TODO: Fix this as well when using torchdynamo with fullgraph=True. + is_tracing = torch.jit.is_tracing() + + if attention_mask is not None: + if torch.all(attention_mask == 1): + if is_tracing: + pass + elif query_length == 1: + # For query_length == 1, causal attention and bi-directional attention are the same. + attention_mask = None + elif key_value_length == query_length: + attention_mask = None + else: + # Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore the attention mask, as SDPA causal mask generation + # may be wrong. We will set `is_causal=False` in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here. + # Reference: https://github.com/pytorch/pytorch/issues/108108 + pass + elif query_length > 1 and key_value_length != query_length: + # See the comment above (https://github.com/pytorch/pytorch/issues/108108). + # Ugly: we set it to True here to dispatch in the following controlflow to `to_causal_4d`. + attention_mask = True + elif is_tracing: + raise ValueError( + 'Attention using SDPA can not be traced with torch.jit.trace when no attention_mask is provided. To solve this issue, please either load your model with the argument `attn_implementation="eager"` or pass an attention_mask input when tracing the model.' + ) + + if attention_mask is None: + expanded_4d_mask = None + elif attention_mask is True: + expanded_4d_mask = attn_mask_converter.to_causal_4d( + input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device + ) + else: + expanded_4d_mask = attn_mask_converter.to_4d( + attention_mask, + input_shape[-1], + dtype=inputs_embeds.dtype, + key_value_length=key_value_length, + ) + + # From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend + # produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213 + if query_length > 1: + expanded_4d_mask = AttentionMaskConverter._unmask_unattended( + expanded_4d_mask, attention_mask, unmasked_value=0.0 + ) + + return expanded_4d_mask + + +def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)` + + Args: + mask (`torch.Tensor` or `None`): + A 2D attention mask of shape `(batch_size, key_value_length)` + dtype (`torch.dtype`): + The torch dtype the created mask shall have. + tgt_len (`int`): + The target length or query length the created mask shall have. + """ + return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) + + +def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)` + + Args: + mask (`torch.Tensor` or `None`): + A 2D attention mask of shape `(batch_size, key_value_length)` + dtype (`torch.dtype`): + The torch dtype the created mask shall have. + tgt_len (`int`): + The target length or query length the created mask shall have. + """ + batch_size, key_value_length = mask.shape + tgt_len = tgt_len if tgt_len is not None else key_value_length + + # torch.jit.trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1` + # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing. + # TODO: Fix this as well when using torchdynamo with fullgraph=True. + is_tracing = torch.jit.is_tracing() + + if torch.all(mask == 1): + if is_tracing: + pass + elif tgt_len == 1: + # For query_length == 1, causal attention and bi-directional attention are the same. + return None + elif key_value_length == tgt_len: + return None + else: + # Unfortunately, for query_length > 1 and key_value_length != query_length, we can not generally ignore the attention mask, as SDPA causal mask generation + # may be wrong. We will set is_causal=False in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here. + # Reference: https://github.com/pytorch/pytorch/issues/108108 + return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) + else: + return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) + + +def _create_4d_causal_attention_mask( + input_shape: Union[torch.Size, Tuple, List], + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0, + sliding_window: Optional[int] = None, +) -> Optional[torch.Tensor]: + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` + + Args: + input_shape (`tuple(int)` or `list(int)` or `torch.Size`): + The input shape should be a tuple that defines `(batch_size, query_length)`. + dtype (`torch.dtype`): + The torch dtype the created mask shall have. + device (`int`): + The torch device the created mask shall have. + sliding_window (`int`, *optional*): + If the model uses windowed attention, a sliding window should be passed. + """ + attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) + + key_value_length = past_key_values_length + input_shape[-1] + attention_mask = attn_mask_converter.to_causal_4d( + input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device + ) + + return attention_mask diff --git a/modified/modeling_flax_outputs.py b/modified/modeling_flax_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..179a0b787936960c118bbb5ad34f73d00469d481 --- /dev/null +++ b/modified/modeling_flax_outputs.py @@ -0,0 +1,700 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Tuple + +import flax +import jax.numpy as jnp + +from .utils import ModelOutput + + +@flax.struct.dataclass +class FlaxBaseModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithNoAttention(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the + model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): + Last layer hidden-state after a pooling operation on the spatial dimensions. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the + model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: jnp.ndarray = None + pooler_output: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxImageClassifierOutputWithNoAttention(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when + `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + past_key_values (`Dict[str, jnp.ndarray]`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: jnp.ndarray = None + past_key_values: Optional[Dict[str, jnp.ndarray]] = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPooling(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) further processed by a + Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence + prediction (classification) objective during pretraining. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: jnp.ndarray = None + pooler_output: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + last_hidden_state: jnp.ndarray = None + pooler_output: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSeq2SeqModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + last_hidden_state: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross attentions weights after the attention softmax, used to compute the weighted average in the + cross-attention heads. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value + states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. + Only relevant if `config.is_decoder = True`. + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + """ + + logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxMaskedLMOutput(ModelOutput): + """ + Base class for masked language models outputs. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +FlaxCausalLMOutput = FlaxMaskedLMOutput + + +@flax.struct.dataclass +class FlaxSeq2SeqLMOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxNextSentencePredictorOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence sentence classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxMultipleChoiceModelOutput(ModelOutput): + """ + Base class for outputs of multiple choice models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, num_choices)`): + *num_choices* is the second dimension of the input tensors. (see *input_ids* above). + + Classification scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxTokenClassifierOutput(ModelOutput): + """ + Base class for outputs of token classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`): + Classification scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of question answering models. + + Args: + start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + start_logits: jnp.ndarray = None + end_logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence question answering models. + + Args: + start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + start_logits: jnp.ndarray = None + end_logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None diff --git a/modified/modeling_flax_pytorch_utils.py b/modified/modeling_flax_pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f78c4e78c78ba8a4eb58881232f37a7de4786137 --- /dev/null +++ b/modified/modeling_flax_pytorch_utils.py @@ -0,0 +1,484 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch - Flax general utilities.""" + + +import os +from pickle import UnpicklingError +from typing import Dict, Tuple + +import jax +import jax.numpy as jnp +import numpy as np +from flax.serialization import from_bytes +from flax.traverse_util import flatten_dict, unflatten_dict + +import transformers + +from . import is_safetensors_available +from .utils import logging + + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.flax import load_file as safe_load_file + + +logger = logging.get_logger(__name__) + + +##################### +# PyTorch => Flax # +##################### + + +def load_pytorch_checkpoint_in_flax_state_dict( + flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False +): + """Load pytorch checkpoints in a flax model""" + try: + import torch # noqa: F401 + except (ImportError, ModuleNotFoundError): + logger.error( + "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" + " instructions." + ) + raise + + if not is_sharded: + pt_path = os.path.abspath(pytorch_checkpoint_path) + logger.info(f"Loading PyTorch weights from {pt_path}") + + if pt_path.endswith(".safetensors"): + pt_state_dict = {} + with safe_open(pt_path, framework="pt") as f: + for k in f.keys(): + pt_state_dict[k] = f.get_tensor(k) + else: + pt_state_dict = torch.load(pt_path, map_location="cpu") + logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.") + + flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) + else: + # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files + flax_state_dict = convert_pytorch_sharded_state_dict_to_flax(pytorch_checkpoint_path, flax_model) + return flax_state_dict + + +def rename_key_and_reshape_tensor( + pt_tuple_key: Tuple[str], + pt_tensor: np.ndarray, + random_flax_state_dict: Dict[str, jnp.ndarray], + model_prefix: str, +) -> (Tuple[str], np.ndarray): + """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" + + def is_key_or_prefix_key_in_dict(key: Tuple[str]) -> bool: + """Checks if `key` of `(prefix,) + key` is in random_flax_state_dict""" + return len(set(random_flax_state_dict) & {key, (model_prefix,) + key}) > 0 + + # layer norm + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) + if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # batch norm layer mean + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("mean",) + if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # batch norm layer var + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("var",) + if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # embedding + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) + if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # conv layer + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) + if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(pt_tuple_key): + pt_tensor = pt_tensor.transpose(2, 3, 1, 0) + return renamed_pt_tuple_key, pt_tensor + + # linear layer + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) + if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(pt_tuple_key): + pt_tensor = pt_tensor.T + return renamed_pt_tuple_key, pt_tensor + + # old PyTorch layer norm weight + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) + if pt_tuple_key[-1] == "gamma": + return renamed_pt_tuple_key, pt_tensor + + # old PyTorch layer norm bias + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) + if pt_tuple_key[-1] == "beta": + return renamed_pt_tuple_key, pt_tensor + + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + name = None + if pt_tuple_key[-3::2] == ("parametrizations", "original0"): + name = pt_tuple_key[-2] + "_g" + elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): + name = pt_tuple_key[-2] + "_v" + if name is not None: + renamed_pt_tuple_key = pt_tuple_key[:-3] + (name,) + return renamed_pt_tuple_key, pt_tensor + + return pt_tuple_key, pt_tensor + + +def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model): + # convert pytorch tensor to numpy + # numpy currently does not support bfloat16, need to go over float32 in this case to not lose precision + try: + import torch # noqa: F401 + except (ImportError, ModuleNotFoundError): + logger.error( + "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" + " instructions." + ) + raise + + weight_dtypes = {k: v.dtype for k, v in pt_state_dict.items()} + pt_state_dict = { + k: v.numpy() if not v.dtype == torch.bfloat16 else v.float().numpy() for k, v in pt_state_dict.items() + } + + model_prefix = flax_model.base_model_prefix + + # use params dict if the model contains batch norm layers + if "params" in flax_model.params: + flax_model_params = flax_model.params["params"] + else: + flax_model_params = flax_model.params + random_flax_state_dict = flatten_dict(flax_model_params) + + # add batch_stats keys,values to dict + if "batch_stats" in flax_model.params: + flax_batch_stats = flatten_dict(flax_model.params["batch_stats"]) + random_flax_state_dict.update(flax_batch_stats) + + flax_state_dict = {} + + load_model_with_head_into_base_model = (model_prefix not in flax_model_params) and ( + model_prefix in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + load_base_model_into_model_with_head = (model_prefix in flax_model_params) and ( + model_prefix not in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + + # Need to change some parameters name to match Flax names + for pt_key, pt_tensor in pt_state_dict.items(): + pt_tuple_key = tuple(pt_key.split(".")) + is_bfloat_16 = weight_dtypes[pt_key] == torch.bfloat16 + + # remove base model prefix if necessary + has_base_model_prefix = pt_tuple_key[0] == model_prefix + if load_model_with_head_into_base_model and has_base_model_prefix: + pt_tuple_key = pt_tuple_key[1:] + + # Correctly rename weight parameters + flax_key, flax_tensor = rename_key_and_reshape_tensor( + pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix + ) + + # add model prefix if necessary + require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict + if load_base_model_into_model_with_head and require_base_model_prefix: + flax_key = (model_prefix,) + flax_key + + if flax_key in random_flax_state_dict: + if flax_tensor.shape != random_flax_state_dict[flax_key].shape: + raise ValueError( + f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " + f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + + # add batch stats if the model contains batchnorm layers + if "batch_stats" in flax_model.params: + if "mean" in flax_key[-1] or "var" in flax_key[-1]: + flax_state_dict[("batch_stats",) + flax_key] = jnp.asarray(flax_tensor) + continue + # remove num_batches_tracked key + if "num_batches_tracked" in flax_key[-1]: + flax_state_dict.pop(flax_key, None) + continue + + # also add unexpected weight so that warning is thrown + flax_state_dict[("params",) + flax_key] = ( + jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) + ) + + else: + # also add unexpected weight so that warning is thrown + flax_state_dict[flax_key] = ( + jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) + ) + + return unflatten_dict(flax_state_dict) + + +############################ +# Sharded Pytorch => Flax # +############################ + + +def convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model): + import torch + + # Load the index + flax_state_dict = {} + for shard_file in shard_filenames: + # load using msgpack utils + pt_state_dict = torch.load(shard_file) + pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} + + model_prefix = flax_model.base_model_prefix + + # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict + if "batch_stats" in flax_model.params: + flax_model_params = flax_model.params["params"] + + random_flax_state_dict = flatten_dict(flax_model_params) + random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"])) + else: + flax_model_params = flax_model.params + random_flax_state_dict = flatten_dict(flax_model_params) + + load_model_with_head_into_base_model = (model_prefix not in flax_model_params) and ( + model_prefix in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + load_base_model_into_model_with_head = (model_prefix in flax_model_params) and ( + model_prefix not in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + # Need to change some parameters name to match Flax names + for pt_key, pt_tensor in pt_state_dict.items(): + pt_tuple_key = tuple(pt_key.split(".")) + + # remove base model prefix if necessary + has_base_model_prefix = pt_tuple_key[0] == model_prefix + if load_model_with_head_into_base_model and has_base_model_prefix: + pt_tuple_key = pt_tuple_key[1:] + + # Correctly rename weight parameters + flax_key, flax_tensor = rename_key_and_reshape_tensor( + pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix + ) + # add model prefix if necessary + require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict + if load_base_model_into_model_with_head and require_base_model_prefix: + flax_key = (model_prefix,) + flax_key + + if flax_key in random_flax_state_dict: + if flax_tensor.shape != random_flax_state_dict[flax_key].shape: + raise ValueError( + f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " + f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + + # add batch stats if the model contains batchnorm layers + if "batch_stats" in flax_model.params: + if "mean" in flax_key[-1]: + flax_state_dict[("batch_stats",) + flax_key] = jnp.asarray(flax_tensor) + continue + if "var" in flax_key[-1]: + flax_state_dict[("batch_stats",) + flax_key] = jnp.asarray(flax_tensor) + continue + # remove num_batches_tracked key + if "num_batches_tracked" in flax_key[-1]: + flax_state_dict.pop(flax_key, None) + continue + + # also add unexpected weight so that warning is thrown + flax_state_dict[("params",) + flax_key] = jnp.asarray(flax_tensor) + + else: + # also add unexpected weight so that warning is thrown + flax_state_dict[flax_key] = jnp.asarray(flax_tensor) + return unflatten_dict(flax_state_dict) + + +##################### +# Flax => PyTorch # +##################### + + +def load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path): + """Load flax checkpoints in a PyTorch model""" + flax_checkpoint_path = os.path.abspath(flax_checkpoint_path) + logger.info(f"Loading Flax weights from {flax_checkpoint_path}") + + # import correct flax class + flax_cls = getattr(transformers, "Flax" + model.__class__.__name__) + + # load flax weight dict + if flax_checkpoint_path.endswith(".safetensors"): + flax_state_dict = safe_load_file(flax_checkpoint_path) + flax_state_dict = unflatten_dict(flax_state_dict, sep=".") + else: + with open(flax_checkpoint_path, "rb") as state_f: + try: + flax_state_dict = from_bytes(flax_cls, state_f.read()) + except UnpicklingError: + raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. ") + + return load_flax_weights_in_pytorch_model(model, flax_state_dict) + + +def load_flax_weights_in_pytorch_model(pt_model, flax_state): + """Load flax checkpoints in a PyTorch model""" + + try: + import torch # noqa: F401 + except (ImportError, ModuleNotFoundError): + logger.error( + "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" + " instructions." + ) + raise + + # check if we have bf16 weights + is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() + if any(is_type_bf16): + # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 + # and bf16 is not fully supported in PT yet. + logger.warning( + "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " + "before loading those in PyTorch model." + ) + flax_state = jax.tree_util.tree_map( + lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state + ) + + flax_state_dict = flatten_dict(flax_state) + pt_model_dict = pt_model.state_dict() + + load_model_with_head_into_base_model = (pt_model.base_model_prefix in flax_state) and ( + pt_model.base_model_prefix not in {k.split(".")[0] for k in pt_model_dict.keys()} + ) + load_base_model_into_model_with_head = (pt_model.base_model_prefix not in flax_state) and ( + pt_model.base_model_prefix in {k.split(".")[0] for k in pt_model_dict.keys()} + ) + + # keep track of unexpected & missing keys + unexpected_keys = [] + missing_keys = set(pt_model_dict.keys()) + + for flax_key_tuple, flax_tensor in flax_state_dict.items(): + has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix + require_base_model_prefix = ".".join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict + + # adapt flax_key to prepare for loading from/to base model only + if load_model_with_head_into_base_model and has_base_model_prefix: + flax_key_tuple = flax_key_tuple[1:] + elif load_base_model_into_model_with_head and require_base_model_prefix: + flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple + + # rename flax weights to PyTorch format + if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(flax_key_tuple) not in pt_model_dict: + # conv layer + flax_key_tuple = flax_key_tuple[:-1] + ("weight",) + flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1)) + elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple) not in pt_model_dict: + # linear layer + flax_key_tuple = flax_key_tuple[:-1] + ("weight",) + flax_tensor = flax_tensor.T + elif flax_key_tuple[-1] in ["scale", "embedding"]: + flax_key_tuple = flax_key_tuple[:-1] + ("weight",) + + # adding batch stats from flax batch norm to pt + elif "mean" in flax_key_tuple[-1]: + flax_key_tuple = flax_key_tuple[:-1] + ("running_mean",) + elif "var" in flax_key_tuple[-1]: + flax_key_tuple = flax_key_tuple[:-1] + ("running_var",) + + if "batch_stats" in flax_state: + flax_key = ".".join(flax_key_tuple[1:]) # Remove the params/batch_stats header + else: + flax_key = ".".join(flax_key_tuple) + + # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. + special_pt_names = {} + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + for key in pt_model_dict: + key_components = key.split(".") + name = None + if key_components[-3::2] == ["parametrizations", "original0"]: + name = key_components[-2] + "_g" + elif key_components[-3::2] == ["parametrizations", "original1"]: + name = key_components[-2] + "_v" + if name is not None: + key_components = key_components[:-3] + [name] + key_to_check = ".".join(key_components) + special_pt_names[key_to_check] = key + + if flax_key in special_pt_names: + flax_key = special_pt_names[flax_key] + + if flax_key in pt_model_dict: + if flax_tensor.shape != pt_model_dict[flax_key].shape: + raise ValueError( + f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " + f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + else: + # add weight to pytorch dict + flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor + pt_model_dict[flax_key] = torch.from_numpy(flax_tensor) + # remove from missing keys + missing_keys.remove(flax_key) + else: + # weight is not expected by PyTorch model + unexpected_keys.append(flax_key) + + pt_model.load_state_dict(pt_model_dict) + + # re-transform missing_keys to list + missing_keys = list(missing_keys) + + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the Flax model were not used when initializing the PyTorch model" + f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" + " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" + f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" + " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" + " FlaxBertForSequenceClassification model)." + ) + else: + logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" + f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" + " use it for predictions and inference." + ) + else: + logger.warning( + f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" + "If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {pt_model.__class__.__name__} for predictions without further training." + ) + + return pt_model diff --git a/modified/modeling_flax_utils.py b/modified/modeling_flax_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eb14216c5cd994f57e787dc22f67906d0ba07b6b --- /dev/null +++ b/modified/modeling_flax_utils.py @@ -0,0 +1,1289 @@ +# coding=utf-8 +# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gc +import json +import os +import re +import warnings +from functools import partial +from pickle import UnpicklingError +from typing import Any, Dict, Optional, Set, Tuple, Union + +import flax.linen as nn +import jax +import jax.numpy as jnp +import msgpack.exceptions +from flax.core.frozen_dict import FrozenDict, unfreeze +from flax.serialization import from_bytes, to_bytes +from flax.traverse_util import flatten_dict, unflatten_dict +from jax.random import PRNGKey + +from .configuration_utils import PretrainedConfig +from .dynamic_module_utils import custom_object_save +from .generation import FlaxGenerationMixin, GenerationConfig +from .modeling_flax_pytorch_utils import load_pytorch_checkpoint_in_flax_state_dict +from .utils import ( + FLAX_WEIGHTS_INDEX_NAME, + FLAX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + PushToHubMixin, + add_code_sample_docstrings, + add_start_docstrings_to_model_forward, + cached_file, + copy_func, + download_url, + has_file, + is_offline_mode, + is_remote_url, + logging, + replace_return_docstrings, +) +from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files +from .utils.import_utils import is_safetensors_available + + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.flax import load_file as safe_load_file + from safetensors.flax import save_file as safe_save_file + +logger = logging.get_logger(__name__) + + +def quick_gelu(x): + return x * jax.nn.sigmoid(1.702 * x) + + +ACT2FN = { + "gelu": partial(nn.gelu, approximate=False), + "relu": nn.relu, + "silu": nn.swish, + "swish": nn.swish, + "gelu_new": partial(nn.gelu, approximate=True), + "quick_gelu": quick_gelu, +} + + +def dtype_byte_size(dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: + ```py + >>> dtype_byte_size(np.float32) + 4 + ``` + """ + if dtype == bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", dtype.name) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def flax_shard_checkpoint(params, max_shard_size="10GB"): + """ + Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a + given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so + there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For + example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as + [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will + have a size greater than `max_shard_size`. + + + + Args: + params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit + (like `"5MB"`). + """ + max_shard_size = convert_file_size_to_int(max_shard_size) + + sharded_state_dicts = [] + current_block = {} + current_block_size = 0 + total_size = 0 + + # flatten the weights to chunk + weights = flatten_dict(params, sep="/") + for item in weights: + weight_size = weights[item].size * dtype_byte_size(weights[item].dtype) + + # If this weight is going to tip up over the maximal size, we split. + if current_block_size + weight_size > max_shard_size: + sharded_state_dicts.append(current_block) + current_block = {} + current_block_size = 0 + + current_block[item] = weights[item] + current_block_size += weight_size + total_size += weight_size + + # Add the last block + sharded_state_dicts.append(current_block) + + # If we only have one shard, we return it + if len(sharded_state_dicts) == 1: + return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None + + # Otherwise, let's build the index + weight_map = {} + shards = {} + for idx, shard in enumerate(sharded_state_dicts): + shard_file = FLAX_WEIGHTS_NAME.replace(".msgpack", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack") + shards[shard_file] = shard + for weight_name in shard.keys(): + weight_map[weight_name] = shard_file + + # Add the metadata + metadata = {"total_size": total_size} + index = {"metadata": metadata, "weight_map": weight_map} + return shards, index + + +class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin): + r""" + Base class for all models. + + [`FlaxPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, + downloading and saving models. + + Class attributes (overridden by derived classes): + + - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class + for this model architecture. + - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived + classes of the same architecture adding modules on top of the base model. + - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP + models, `pixel_values` for vision models and `input_values` for speech models). + """ + + config_class = None + base_model_prefix = "" + main_input_name = "input_ids" + _auto_class = None + _missing_keys = set() + + def __init__( + self, + config: PretrainedConfig, + module: nn.Module, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + ): + if config is None: + raise ValueError("config cannot be None") + + if module is None: + raise ValueError("module cannot be None") + + # Those are private to be exposed as typed property on derived classes. + self._config = config + self._module = module + + # Those are public as their type is generic to every derived classes. + self.key = PRNGKey(seed) + self.dtype = dtype + self.input_shape = input_shape + self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + + # To check if the model was intialized automatically. + self._is_initialized = _do_init + + if _do_init: + # randomly initialized parameters + random_params = self.init_weights(self.key, input_shape) + params_shape_tree = jax.eval_shape(lambda params: params, random_params) + else: + init_fn = partial(self.init_weights, input_shape=input_shape) + params_shape_tree = jax.eval_shape(init_fn, self.key) + + logger.info( + "Model weights are not initialized as `_do_init` is set to `False`. " + f"Make sure to call `{self.__class__.__name__}.init_weights` manually to initialize the weights." + ) + + # get the shape of the parameters + self._params_shape_tree = params_shape_tree + + # save required_params as set + self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) + + # initialize the parameters + if _do_init: + self.params = random_params + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> Dict: + raise NotImplementedError(f"init method has to be implemented for {self}") + + def enable_gradient_checkpointing(self): + raise NotImplementedError(f"gradient checkpointing method has to be implemented for {self}") + + @classmethod + def _from_config(cls, config, **kwargs): + """ + All context managers that the model should be initialized under go here. + """ + return cls(config, **kwargs) + + @property + def framework(self) -> str: + """ + :str: Identifies that this is a Flax model. + """ + return "flax" + + @property + def config(self) -> PretrainedConfig: + return self._config + + @property + def module(self) -> nn.Module: + return self._module + + @property + def params(self) -> Union[Dict, FrozenDict]: + if not self._is_initialized: + raise ValueError( + "`params` cannot be accessed from model when the model is created with `_do_init=False`. " + "You must call `init_weights` manually and store the params outside of the model and " + "pass it explicitly where needed." + ) + return self._params + + @property + def required_params(self) -> Set: + return self._required_params + + @property + def params_shape_tree(self) -> Dict: + return self._params_shape_tree + + @params.setter + def params(self, params: Union[Dict, FrozenDict]): + # don't set params if the model is not initialized + if not self._is_initialized: + raise ValueError( + "`params` cannot be set from model when the model is created with `_do_init=False`. " + "You store the params outside of the model." + ) + + if isinstance(params, FrozenDict): + params = unfreeze(params) + param_keys = set(flatten_dict(params).keys()) + if len(self.required_params - param_keys) > 0: + raise ValueError( + "Some parameters are missing. Make sure that `params` include the following " + f"parameters {self.required_params - param_keys}" + ) + self._params = params + + def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: + """ + Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. + """ + + # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 + def conditional_cast(param): + if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): + param = param.astype(dtype) + return param + + if mask is None: + return jax.tree_util.tree_map(conditional_cast, params) + + flat_params = flatten_dict(params) + flat_mask, _ = jax.tree_util.tree_flatten(mask) + + for masked, key in zip(flat_mask, flat_params.keys()): + if masked: + param = flat_params[key] + flat_params[key] = conditional_cast(param) + + return unflatten_dict(flat_params) + + def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): + r""" + Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast + the `params` in place. + + This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full + half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed. + + Arguments: + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + mask (`Union[Dict, FrozenDict]`): + A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params + you want to cast, and should be `False` for those you want to skip. + + Examples: + + ```python + >>> from transformers import FlaxBertModel + + >>> # load model + >>> model = FlaxBertModel.from_pretrained("bert-base-cased") + >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision + >>> model.params = model.to_bf16(model.params) + >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) + >>> # then pass the mask as follows + >>> from flax import traverse_util + + >>> model = FlaxBertModel.from_pretrained("bert-base-cased") + >>> flat_params = traverse_util.flatten_dict(model.params) + >>> mask = { + ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) + ... for path in flat_params + ... } + >>> mask = traverse_util.unflatten_dict(mask) + >>> model.params = model.to_bf16(model.params, mask) + ```""" + return self._cast_floating_to(params, jnp.bfloat16, mask) + + def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): + r""" + Cast the floating-point `parmas` to `jax.numpy.float32`. This method can be used to explicitly convert the + model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place. + + Arguments: + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + mask (`Union[Dict, FrozenDict]`): + A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params + you want to cast, and should be `False` for those you want to skip + + Examples: + + ```python + >>> from transformers import FlaxBertModel + + >>> # Download model and configuration from huggingface.co + >>> model = FlaxBertModel.from_pretrained("bert-base-cased") + >>> # By default, the model params will be in fp32, to illustrate the use of this method, + >>> # we'll first cast to fp16 and back to fp32 + >>> model.params = model.to_f16(model.params) + >>> # now cast back to fp32 + >>> model.params = model.to_fp32(model.params) + ```""" + return self._cast_floating_to(params, jnp.float32, mask) + + def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): + r""" + Cast the floating-point `parmas` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the + `params` in place. + + This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full + half-precision training or to save weights in float16 for inference in order to save memory and improve speed. + + Arguments: + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + mask (`Union[Dict, FrozenDict]`): + A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params + you want to cast, and should be `False` for those you want to skip + + Examples: + + ```python + >>> from transformers import FlaxBertModel + + >>> # load model + >>> model = FlaxBertModel.from_pretrained("bert-base-cased") + >>> # By default, the model params will be in fp32, to cast these to float16 + >>> model.params = model.to_fp16(model.params) + >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) + >>> # then pass the mask as follows + >>> from flax import traverse_util + + >>> model = FlaxBertModel.from_pretrained("bert-base-cased") + >>> flat_params = traverse_util.flatten_dict(model.params) + >>> mask = { + ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) + ... for path in flat_params + ... } + >>> mask = traverse_util.unflatten_dict(mask) + >>> model.params = model.to_fp16(model.params, mask) + ```""" + return self._cast_floating_to(params, jnp.float16, mask) + + @classmethod + def load_flax_weights(cls, resolved_archive_file): + try: + if resolved_archive_file.endswith(".safetensors"): + state = safe_load_file(resolved_archive_file) + state = unflatten_dict(state, sep=".") + else: + with open(resolved_archive_file, "rb") as state_f: + state = from_bytes(cls, state_f.read()) + except (UnpicklingError, msgpack.exceptions.ExtraData) as e: + try: + with open(resolved_archive_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please" + " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" + " folder you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise EnvironmentError(f"Unable to convert {resolved_archive_file} to Flax deserializable object. ") + + return state + + @classmethod + def load_flax_sharded_weights(cls, shard_files): + """ + This is the same as [`flax.serialization.from_bytes`] + (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint. + + This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being + loaded in the model. + + Args: + shard_files (`List[str]`: + The list of shard files to load. + + Returns: + `Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model': + {'params': {'...'}}}`. + """ + + # Load the index + state_sharded_dict = {} + + for shard_file in shard_files: + # load using msgpack utils + try: + with open(shard_file, "rb") as state_f: + state = from_bytes(cls, state_f.read()) + except (UnpicklingError, msgpack.exceptions.ExtraData) as e: + with open(shard_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please" + " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" + " folder you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise EnvironmentError(f"Unable to convert {shard_file} to Flax deserializable object. ") + + state = flatten_dict(state, sep="/") + state_sharded_dict.update(state) + del state + gc.collect() + + # the state dict is unflattened to the match the format of model.params + return unflatten_dict(state_sharded_dict, sep="/") + + @classmethod + def can_generate(cls) -> bool: + """ + Returns whether this model can generate sequences with `.generate()`. Returns: + `bool`: Whether this model can generate sequences with `.generate()`. + """ + # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. + # Alternativelly, the model can also have a custom `generate` function. + if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): + return False + return True + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + dtype: jnp.dtype = jnp.float32, + *model_args, + config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + ignore_mismatched_sizes: bool = False, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ): + r""" + Instantiate a pretrained flax model from a pre-trained model configuration. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *pt index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, + `from_pt` should be set to `True`. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. + model_args (sequence of positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): + Can be either: + + - an instance of a class derived from [`PretrainedConfig`], + - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. + + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_pt (`bool`, *optional*, defaults to `False`): + Load the model weights from a PyTorch checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): + Whether or not to raise an error if some of the weights from the checkpoint do not have the same size + as the weights of the model (if for instance, you are instantiating a model with 10 labels from a + checkpoint with 3 labels). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + Examples: + + ```python + >>> from transformers import BertConfig, FlaxBertModel + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = FlaxBertModel.from_pretrained("bert-base-cased") + >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). + >>> model = FlaxBertModel.from_pretrained("./test/saved_model/") + >>> # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). + >>> config = BertConfig.from_json_file("./pt_model/config.json") + >>> model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config) + ```""" + from_pt = kwargs.pop("from_pt", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + use_auth_token = kwargs.pop("use_auth_token", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + _do_init = kwargs.pop("_do_init", True) + subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) + + # Not relevant for Flax Models + _ = kwargs.pop("adapter_kwargs", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if trust_remote_code is True: + logger.warning( + "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" + " ignored." + ) + + user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Load config if we don't provide a configuration + if not isinstance(config, PretrainedConfig): + config_path = config if config is not None else pretrained_model_name_or_path + config, model_kwargs = cls.config_class.from_pretrained( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + _commit_hash=commit_hash, + **kwargs, + ) + else: + model_kwargs = kwargs.copy() + + if commit_hash is None: + commit_hash = getattr(config, "_commit_hash", None) + + # Add the dtype to model_kwargs + model_kwargs["dtype"] = dtype + + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the + # index of the files. + is_sharded = False + + # Load model + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isdir(pretrained_model_name_or_path): + if os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): + # Load from a Flax checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)): + # Load from a sharded Flax checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME) + is_sharded = True + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) + elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): + # Load from a PyTorch checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME) + elif from_pt and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME) + ): + # Load from a sharded pytorch checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME) + is_sharded = True + # At this stage we don't have a weight file so we will raise an error. + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + is_sharded = True + raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!") + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " + "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " + "weights." + ) + else: + raise EnvironmentError( + f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " + f"{pretrained_model_name_or_path}." + ) + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): + archive_file = pretrained_model_name_or_path + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + filename = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) + else: + if from_pt: + filename = WEIGHTS_NAME + else: + filename = FLAX_WEIGHTS_NAME + + try: + # Load from URL or cache if already cached + cached_file_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "resume_download": resume_download, + "local_files_only": local_files_only, + "token": token, + "user_agent": user_agent, + "revision": revision, + "subfolder": subfolder, + "_raise_exceptions_for_missing_entries": False, + "_commit_hash": commit_hash, + } + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME: + resolved_archive_file = cached_file( + pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs + ) + if resolved_archive_file is not None: + is_sharded = True + + # Maybe the checkpoint is pytorch sharded, we try to grab the pytorch index name in this case. + if resolved_archive_file is None and from_pt: + resolved_archive_file = cached_file( + pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs + ) + if resolved_archive_file is not None: + is_sharded = True + + # If we still haven't found anything, look for `safetensors`. + if resolved_archive_file is None: + # No support for sharded safetensors yet, so we'll raise an error if that's all we find. + filename = SAFE_WEIGHTS_NAME + resolved_archive_file = cached_file( + pretrained_model_name_or_path, SAFE_WEIGHTS_NAME, **cached_file_kwargs + ) + + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None: + # Otherwise, maybe there is a TF or Torch model file. We try those to give a helpful error + # message. + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "token": token, + } + if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): + is_sharded = True + raise NotImplementedError( + "Support for sharded checkpoints using safetensors is coming soon!" + ) + elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" + " load this model from those weights." + ) + elif has_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {FLAX_WEIGHTS_INDEX_NAME} but there is a sharded file for PyTorch weights. Use" + " `from_pt=True` to load this model from those weights." + ) + else: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." + ) + + if is_local: + logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file + filename = resolved_archive_file.split(os.path.sep)[-1] + else: + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") + else: + resolved_archive_file = None + + # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. + if is_sharded: + # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. + resolved_archive_file, _ = get_checkpoint_shard_files( + pretrained_model_name_or_path, + resolved_archive_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + + safetensors_from_pt = False + if filename == SAFE_WEIGHTS_NAME: + with safe_open(resolved_archive_file, framework="flax") as f: + safetensors_metadata = f.metadata() + if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." + " Make sure you save your model with the `save_pretrained` method." + ) + safetensors_from_pt = safetensors_metadata.get("format") == "pt" + + # init random models + model = cls(config, *model_args, _do_init=_do_init, **model_kwargs) + + if from_pt or safetensors_from_pt: + state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded) + else: + if is_sharded: + state = cls.load_flax_sharded_weights(resolved_archive_file) + else: + state = cls.load_flax_weights(resolved_archive_file) + # make sure all arrays are stored as jnp.arrays + # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: + # https://github.com/google/flax/issues/1261 + if _do_init: + state = jax.tree_util.tree_map(jnp.array, state) + else: + # keep the params on CPU if we don't want to initialize + state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state) + + if "batch_stats" in state: # if flax model contains batch norm layers + # if model is base model only use model_prefix key + if ( + cls.base_model_prefix not in dict(model.params_shape_tree["params"]) + and cls.base_model_prefix in state["params"] + ): + state["params"] = state["params"][cls.base_model_prefix] + state["batch_stats"] = state["batch_stats"][cls.base_model_prefix] + + # if model is head model and we are loading weights from base model + # we initialize new params dict with base_model_prefix + if ( + cls.base_model_prefix in dict(model.params_shape_tree["params"]) + and cls.base_model_prefix not in state["params"] + ): + state = { + "params": {cls.base_model_prefix: state["params"]}, + "batch_stats": {cls.base_model_prefix: state["batch_stats"]}, + } + + else: + # if model is base model only use model_prefix key + if cls.base_model_prefix not in dict(model.params_shape_tree) and cls.base_model_prefix in state: + state = state[cls.base_model_prefix] + + # if model is head model and we are loading weights from base model + # we initialize new params dict with base_model_prefix + if cls.base_model_prefix in dict(model.params_shape_tree) and cls.base_model_prefix not in state: + state = {cls.base_model_prefix: state} + + # flatten dicts + state = flatten_dict(state) + + random_state = flatten_dict(unfreeze(model.params if _do_init else model.params_shape_tree)) + + missing_keys = model.required_params - set(state.keys()) + unexpected_keys = set(state.keys()) - model.required_params + + # Disabling warning when porting pytorch weights to flax, flax does not uses num_batches_tracked + for unexpected_key in unexpected_keys.copy(): + if "num_batches_tracked" in unexpected_key[-1]: + unexpected_keys.remove(unexpected_key) + + if missing_keys and not _do_init: + logger.warning( + f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. " + "Make sure to call model.init_weights to initialize the missing weights." + ) + cls._missing_keys = missing_keys + + # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not + # matching the weights in the model. + mismatched_keys = [] + for key in state.keys(): + if key in random_state and state[key].shape != random_state[key].shape: + if ignore_mismatched_sizes: + mismatched_keys.append((key, state[key].shape, random_state[key].shape)) + state[key] = random_state[key] + else: + raise ValueError( + f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " + f"{state[key].shape} which is incompatible with the model shape {random_state[key].shape}. " + "Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this " + "model." + ) + + # add missing keys as random parameters if we are initializing + if missing_keys and _do_init: + for missing_key in missing_keys: + state[missing_key] = random_state[missing_key] + + # remove unexpected keys to not be saved again + for unexpected_key in unexpected_keys: + del state[unexpected_key] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" + " with another architecture (e.g. initializing a BertForSequenceClassification model from a" + " BertForPreTraining model).\n- This IS NOT expected if you are initializing" + f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" + " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." + ) + else: + logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") + + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" + f" was trained on, you can already use {model.__class__.__name__} for predictions without further" + " training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + # dictionary of key: dtypes for the model params + param_dtypes = jax.tree_util.tree_map(lambda x: x.dtype, state) + # extract keys of parameters not in jnp.float32 + fp16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.float16] + bf16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.bfloat16] + + # raise a warning if any of the parameters are not in jnp.float32 + if len(fp16_params) > 0: + logger.warning( + f"Some of the weights of {model.__class__.__name__} were initialized in float16 precision from " + f"the model checkpoint at {pretrained_model_name_or_path}:\n{fp16_params}\n" + "You should probably UPCAST the model weights to float32 if this was not intended. " + "See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this." + ) + + if len(bf16_params) > 0: + logger.warning( + f"Some of the weights of {model.__class__.__name__} were initialized in bfloat16 precision from " + f"the model checkpoint at {pretrained_model_name_or_path}:\n{bf16_params}\n" + "You should probably UPCAST the model weights to float32 if this was not intended. " + "See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this." + ) + + # If it is a model with generation capabilities, attempt to load the generation config + if model.can_generate(): + try: + model.generation_config = GenerationConfig.from_pretrained( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + except OSError: + logger.info( + "Generation config file not found, using a generation config created from the model config." + ) + pass + + if _do_init: + # set correct parameters + model.params = unflatten_dict(state) + return model + else: + return model, unflatten_dict(state) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + params=None, + push_to_hub=False, + max_shard_size="10GB", + token: Optional[Union[str, bool]] = None, + safe_serialization: bool = False, + **kwargs, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + `[`~FlaxPreTrainedModel.from_pretrained`]` class method + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save the model using `safetensors` or through msgpack. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # get abs dir + save_directory = os.path.abspath(save_directory) + # save config as well + self.config.architectures = [self.__class__.__name__[4:]] + + # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self.config) + + self.config.save_pretrained(save_directory) + if self.can_generate(): + self.generation_config.save_pretrained(save_directory) + + # save model + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else FLAX_WEIGHTS_NAME + output_model_file = os.path.join(save_directory, weights_name) + + shards, index = flax_shard_checkpoint(params if params is not None else self.params, max_shard_size) + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + ): + os.remove(full_filename) + + if index is None: + if safe_serialization: + params = params if params is not None else self.params + flat_dict = flatten_dict(params, sep=".") + safe_save_file(flat_dict, output_model_file, metadata={"format": "flax"}) + else: + with open(output_model_file, "wb") as f: + params = params if params is not None else self.params + model_bytes = to_bytes(params) + f.write(model_bytes) + + else: + save_index_file = os.path.join(save_directory, FLAX_WEIGHTS_INDEX_NAME) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + for shard_file, shard in shards.items(): + # the shard item are unflattened, to save them we need to flatten them again + with open(os.path.join(save_directory, shard_file), mode="wb") as f: + params = unflatten_dict(shard, sep="/") + shard_bytes = to_bytes(params) + f.write(shard_bytes) + + logger.info(f"Model weights saved in {output_model_file}") + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + ) + + @classmethod + def register_for_auto_class(cls, auto_class="FlaxAutoModel"): + """ + Register this class with a given auto class. This should only be used for custom models as the ones in the + library are already mapped with an auto class. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"FlaxAutoModel"`): + The auto class to register this new model with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + +# To update the docstring, we need to copy the method, otherwise we change the original docstring. +FlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub) +if FlaxPreTrainedModel.push_to_hub.__doc__ is not None: + FlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format( + object="model", object_class="FlaxAutoModel", object_files="model checkpoint" + ) + + +def overwrite_call_docstring(model_class, docstring): + # copy __call__ function to be sure docstring is changed only for this function + model_class.__call__ = copy_func(model_class.__call__) + # delete existing docstring + model_class.__call__.__doc__ = None + # set correct docstring + model_class.__call__ = add_start_docstrings_to_model_forward(docstring)(model_class.__call__) + + +def append_call_sample_docstring( + model_class, checkpoint, output_type, config_class, mask=None, revision=None, real_checkpoint=None +): + model_class.__call__ = copy_func(model_class.__call__) + model_class.__call__ = add_code_sample_docstrings( + checkpoint=checkpoint, + output_type=output_type, + config_class=config_class, + model_cls=model_class.__name__, + revision=revision, + real_checkpoint=real_checkpoint, + )(model_class.__call__) + + +def append_replace_return_docstrings(model_class, output_type, config_class): + model_class.__call__ = copy_func(model_class.__call__) + model_class.__call__ = replace_return_docstrings( + output_type=output_type, + config_class=config_class, + )(model_class.__call__) diff --git a/modified/modeling_outputs.py b/modified/modeling_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..cbee6a292b531b7a279fa178f589008769e6c06f --- /dev/null +++ b/modified/modeling_outputs.py @@ -0,0 +1,1753 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch + +from .utils import ModelOutput + + +@dataclass +class BaseModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithNoAttention(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithPooling(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state after a pooling operation on the spatial dimensions. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithCrossAttentions(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoECausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden + states terms, to train a MoE model. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + z_loss for the sparse modules. + aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + aux_loss for the sparse modules. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse + modules. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + z_loss: torch.FloatTensor = None + aux_loss: torch.FloatTensor = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoEModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary + loss and the z_loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + router_probs: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoeModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary + loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoeCausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) with mixture of experts outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + + aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + aux_loss for the sparse modules. + + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary + loss for Mixture of Experts models. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + aux_loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as + Mixture of Expert's router hidden states terms, to train a MoE model. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary + loss and the z_loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + router_probs: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqMoEModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse + modules. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class CausalLMOutput(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class CausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class CausalLMOutputWithCrossAttentions(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross attentions weights after the attention softmax, used to compute the weighted average in the + cross-attention heads. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `torch.FloatTensor` tuples of length `config.n_layers`, with each tuple containing the cached key, + value states of the self-attention and the cross-attention layers if model is used in encoder-decoder + setting. Only relevant if `config.is_decoder = True`. + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SequenceClassifierOutputWithPast(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MaskedLMOutput(ModelOutput): + """ + Base class for masked language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Masked language modeling (MLM) loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqLMOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqMoEOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and z_loss for Mixture of Experts + models. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + encoder_z_loss: torch.FloatTensor = None + decoder_z_loss: torch.FloatTensor = None + encoder_aux_loss: torch.FloatTensor = None + decoder_aux_loss: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class NextSentencePredictorOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided): + Next sequence prediction (classification) loss. + logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MultipleChoiceModelOutput(ModelOutput): + """ + Base class for outputs of multiple choice models. + + Args: + loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): + *num_choices* is the second dimension of the input tensors. (see *input_ids* above). + + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class TokenClassifierOutput(ModelOutput): + """ + Base class for outputs of token classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class QuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of question answering models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence question answering models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SemanticSegmenterOutput(ModelOutput): + """ + Base class for outputs of semantic segmentation models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): + Classification scores for each pixel. + + + + The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is + to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the + original image size as post-processing. You should always check your logits shape and resize as needed. + + + + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class ImageClassifierOutput(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class ImageClassifierOutputWithNoAttention(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class DepthEstimatorOutput(ModelOutput): + """ + Base class for outputs of depth estimation models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`): + Predicted depth for each pixel. + + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + predicted_depth: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class ImageSuperResolutionOutput(ModelOutput): + """ + Base class for outputs of image super resolution models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Reconstruction loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed images, possibly upscaled. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Wav2Vec2BaseModelOutput(ModelOutput): + """ + Base class for models that have been trained with the Wav2Vec2 loss objective. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): + Sequence of extracted feature vectors of the last convolutional layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + extract_features: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class XVectorOutput(ModelOutput): + """ + Output type of [`Wav2Vec2ForXVector`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): + Classification hidden states before AMSoftmax. + embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): + Utterance embeddings used for vector similarity-based retrieval. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + embeddings: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BackboneOutput(ModelOutput): + """ + Base class for outputs of backbones. + + Args: + feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`): + Feature maps of the stages. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, num_channels, height, width)`, + depending on the backbone. + + Hidden-states of the model at the output of each stage plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Only applicable if the backbone uses attention. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + feature_maps: Tuple[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndProjection(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + projection_state (`tuple(torch.FloatTensor)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` of shape `(batch_size,config.project_dim)`. + + Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + projection_state: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqSpectrogramOutput(ModelOutput): + """ + Base class for sequence-to-sequence spectrogram outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Spectrogram generation loss. + spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): + The predicted spectrogram. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + spectrogram: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqTSModelOutput(ModelOutput): + """ + Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up + sequential decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Shift values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to shift back to the original magnitude. + scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale back to the original magnitude. + static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + loc: Optional[torch.FloatTensor] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class Seq2SeqTSPredictionOutput(ModelOutput): + """ + Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the + chosen distribution. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): + Distributional loss. + params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): + Parameters of the chosen distribution. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Shift values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to shift back to the original magnitude. + scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale back to the original magnitude. + static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + loss: Optional[torch.FloatTensor] = None + params: Optional[Tuple[torch.FloatTensor]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + loc: Optional[torch.FloatTensor] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class SampleTSPredictionOutput(ModelOutput): + """ + Base class for time series model's predictions outputs that contains the sampled values from the chosen + distribution. + + Args: + sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`): + Sampled values from the chosen distribution. + """ + + sequences: torch.FloatTensor = None + + +@dataclass +class MaskedImageModelingOutput(ModelOutput): + """ + Base class for outputs of masked image completion / in-painting models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): + Reconstruction loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed / completed images. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or + when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when + `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + @property + def logits(self): + warnings.warn( + "logits attribute is deprecated and will be removed in version 5 of Transformers." + " Please use the reconstruction attribute to retrieve the final output instead.", + FutureWarning, + ) + return self.reconstruction diff --git a/modified/modeling_tf_utils.py b/modified/modeling_tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c2daf1da6db30c4842a144daba8f66f2a1655072 --- /dev/null +++ b/modified/modeling_tf_utils.py @@ -0,0 +1,3472 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TF general model utils.""" + +from __future__ import annotations + +import functools +import gc +import inspect +import json +import os +import pickle +import re +import warnings +from collections.abc import Mapping +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union + +import h5py +import numpy as np +import tensorflow as tf +from huggingface_hub import Repository, list_repo_files +from keras import backend as K +from packaging.version import parse + +from . import DataCollatorWithPadding, DefaultDataCollator +from .activations_tf import get_tf_activation +from .configuration_utils import PretrainedConfig +from .dynamic_module_utils import custom_object_save +from .generation import GenerationConfig, TFGenerationMixin +from .tf_utils import ( + expand_1d, + load_attributes_from_hdf5_group, + save_attributes_to_hdf5_group, + shape_list, +) +from .utils import ( + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + TF2_WEIGHTS_INDEX_NAME, + TF2_WEIGHTS_NAME, + TF_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + ModelOutput, + PushToHubMixin, + cached_file, + download_url, + find_labels, + has_file, + is_offline_mode, + is_remote_url, + is_safetensors_available, + is_tf_symbolic_tensor, + logging, + requires_backends, + working_or_temp_dir, +) +from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files + + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.tensorflow import save_file as safe_save_file + +if TYPE_CHECKING: + from . import PreTrainedTokenizerBase + + +logger = logging.get_logger(__name__) +tf_logger = tf.get_logger() + +TFModelInputType = Union[ + List[tf.Tensor], + List[np.ndarray], + Dict[str, tf.Tensor], + Dict[str, np.ndarray], + tf.Tensor, + np.ndarray, +] + + +def dummy_loss(y_true, y_pred): + if y_pred.shape.rank <= 1: + return y_pred + else: + reduction_axes = list(range(1, y_pred.shape.rank)) + return tf.reduce_mean(y_pred, axis=reduction_axes) + + +class TFModelUtilsMixin: + """ + A few utilities for `tf.keras.Model`, to be used as a mixin. + """ + + def num_parameters(self, only_trainable: bool = False) -> int: + """ + Get the number of (optionally, trainable) parameters in the model. + + Args: + only_trainable (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of trainable parameters + + Returns: + `int`: The number of parameters. + """ + if only_trainable: + return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) + else: + return self.count_params() + + +def keras_serializable(cls): + """ + Decorate a Keras Layer class to support Keras serialization. + + This is done by: + + 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at + serialization time. + 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and + convert it to a config object for the actual layer initializer. + 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not + need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`. + + Args: + cls (a `tf.keras.layers.Layers subclass`): + Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its + initializer. + + Returns: + The same class object, with modifications for Keras deserialization. + """ + initializer = cls.__init__ + + config_class = getattr(cls, "config_class", None) + if config_class is None: + raise AttributeError("Must set `config_class` to use @keras_serializable") + + @functools.wraps(initializer) + def wrapped_init(self, *args, **kwargs): + config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) + + if isinstance(config, dict): + config = config_class.from_dict(config) + initializer(self, config, *args, **kwargs) + elif isinstance(config, PretrainedConfig): + if len(args) > 0: + initializer(self, *args, **kwargs) + else: + initializer(self, config, *args, **kwargs) + else: + raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") + + self._config = config + self._kwargs = kwargs + + cls.__init__ = wrapped_init + + if not hasattr(cls, "get_config"): + raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") + if hasattr(cls.get_config, "_is_default"): + + def get_config(self): + cfg = super(cls, self).get_config() + cfg["config"] = self._config.to_dict() + cfg.update(self._kwargs) + return cfg + + cls.get_config = get_config + + cls._keras_serializable = True + if hasattr(tf.keras.utils, "register_keras_serializable"): + cls = tf.keras.utils.register_keras_serializable()(cls) + return cls + + +class TFCausalLanguageModelingLoss: + """ + Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. + + + + Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. + + + """ + + def hf_compute_loss(self, labels, logits): + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + if self.config.tf_legacy_loss: + # make sure only labels that are not equal to -100 affect the loss + active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) + reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) + labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) + return loss_fn(labels, reduced_logits) + + # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway + unmasked_loss = loss_fn(tf.nn.relu(labels), logits) + # make sure only labels that are not equal to -100 affect the loss + loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype) + masked_loss = unmasked_loss * loss_mask + reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) + return tf.reshape(reduced_masked_loss, (1,)) + + +class TFQuestionAnsweringLoss: + """ + Loss function suitable for question answering. + """ + + def hf_compute_loss(self, labels, logits): + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + start_loss = loss_fn(labels["start_position"], logits[0]) + end_loss = loss_fn(labels["end_position"], logits[1]) + + return (start_loss + end_loss) / 2.0 + + +class TFTokenClassificationLoss: + """ + Loss function suitable for token classification. + + + + Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. + + + """ + + def hf_compute_loss(self, labels, logits): + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + if tf.executing_eagerly(): # Data-dependent conditionals are forbidden in XLA + if tf.math.reduce_any(labels == -1): + tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") + + if self.config.tf_legacy_loss: + # make sure only labels that are not equal to -100 + # are taken into account as loss + if tf.math.reduce_any(labels == -1): + tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") + active_loss = tf.reshape(labels, (-1,)) != -1 + else: + active_loss = tf.reshape(labels, (-1,)) != -100 + reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) + labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) + + return loss_fn(labels, reduced_logits) + + # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway + unmasked_loss = loss_fn(tf.nn.relu(labels), logits) + # make sure only labels that are not equal to -100 or -1 + # are taken into account as loss + loss_mask = tf.cast(labels >= 0, dtype=unmasked_loss.dtype) + # Avoid possible division by zero later + # Masked positions will have a loss of NaN because -100 and -1 are not valid labels + masked_loss = unmasked_loss * loss_mask + reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) + return tf.reshape(reduced_masked_loss, (1,)) + + +class TFSequenceClassificationLoss: + """ + Loss function suitable for sequence classification. + """ + + def hf_compute_loss(self, labels, logits): + if logits.shape.rank == 1 or logits.shape[1] == 1: + loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) + if labels.shape.rank == 1: + # MeanSquaredError returns a scalar loss if the labels are 1D, so avoid that + labels = tf.expand_dims(labels, axis=-1) + else: + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + + return loss_fn(labels, logits) + + +class TFMultipleChoiceLoss: + """Loss function suitable for multiple choice tasks.""" + + def hf_compute_loss(self, labels, logits): + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + return loss_fn(labels, logits) + + +class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): + """ + Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. + + + + Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. + + + """ + + +class TFNextSentencePredictionLoss: + """ + Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. + + + + Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. + + + """ + + def hf_compute_loss(self, labels, logits): + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + if self.config.tf_legacy_loss: + # make sure only labels that are not equal to -100 + # are taken into account as loss + next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) + next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) + next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) + + return loss_fn(next_sentence_label, next_sentence_reduced_logits) + + # make sure only labels that are not equal to -100 + # are taken into account as loss + + # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway + unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels), y_pred=logits) + ns_loss_mask = tf.cast(labels != -100, dtype=unmasked_ns_loss.dtype) + # Just zero out samples where label is -100, no reduction + masked_ns_loss = unmasked_ns_loss * ns_loss_mask + + return masked_ns_loss + + +def booleans_processing(config, **kwargs): + """ + Process the input booleans of each model. + + Args: + config ([`PretrainedConfig`]): + The config of the running model. + **kwargs: + The boolean parameters + + Returns: + A dictionary with the proper values for each boolean + """ + final_booleans = {} + + # Pure conv models (such as ConvNext) do not have `output_attentions`. If the signature has + # `output_attentions`, it will be present here in `kwargs`, even if unset (in that case, as `None`) + if "output_attentions" in kwargs: + final_booleans["output_attentions"] = ( + kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions + ) + final_booleans["output_hidden_states"] = ( + kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states + ) + final_booleans["return_dict"] = kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict + + if "use_cache" in kwargs: + final_booleans["use_cache"] = ( + kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None) + ) + return final_booleans + + +def unpack_inputs(func): + """ + Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables + downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input + (common case in Keras). + + Args: + func (`callable`): + The callable function of the TensorFlow model. + + + Returns: + A callable that wraps the original `func` with the behavior described above. + """ + + original_signature = inspect.signature(func) + + @functools.wraps(func) + def run_call_with_unpacked_inputs(self, *args, **kwargs): + # isolates the actual `**kwargs` for the decorated function + kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)} + fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call} + fn_args_and_kwargs.update({"kwargs_call": kwargs_call}) + + # move any arg into kwargs, if they exist + fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args))) + + # Encoder Decoder models delegate the application of the configuration options to their inner models. + if "EncoderDecoder" in self.__class__.__name__: + config = None + else: + config = self.config + + unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs) + return func(self, **unpacked_inputs) + + # Keras enforces the first layer argument to be passed, and checks it through `inspect.getfullargspec()`. This + # function does not follow wrapper chains (i.e. ignores `functools.wraps()`), meaning that without the line below + # Keras would attempt to check the first argument against the literal signature of the wrapper. + run_call_with_unpacked_inputs.__signature__ = original_signature + + return run_call_with_unpacked_inputs + + +def input_processing(func, config, **kwargs): + """ + Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input + has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', + name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. + + Args: + func (`callable`): + The callable function of the TensorFlow model. + config ([`PretrainedConfig`]): + The config of the running model. + **kwargs: + The inputs of the model. + + Returns: + Two lists, one for the missing layers, and another one for the unexpected layers. + """ + signature = dict(inspect.signature(func).parameters) + has_kwargs = bool(signature.pop("kwargs", None)) + signature.pop("self", None) + parameter_names = list(signature.keys()) + main_input_name = parameter_names[0] + main_input = kwargs.pop(main_input_name, None) + output = {} + allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) + + if "inputs" in kwargs["kwargs_call"]: + warnings.warn( + "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", + FutureWarning, + ) + + output["input_ids"] = kwargs["kwargs_call"].pop("inputs") + + if "decoder_cached_states" in kwargs["kwargs_call"]: + warnings.warn( + "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use" + " `past_key_values` instead.", + FutureWarning, + ) + output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") + + if "past" in kwargs["kwargs_call"] and "past_key_values" in parameter_names: + warnings.warn( + "The `past` argument is deprecated and will be removed in a future version, use `past_key_values`" + " instead.", + FutureWarning, + ) + kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past") + elif "past_key_values" in kwargs["kwargs_call"] and "past" in parameter_names: + kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values") + + if has_kwargs: + output["kwargs"] = kwargs.pop("kwargs_call", {}) + else: + if len(kwargs["kwargs_call"]) > 0: + raise ValueError( + "The following keyword arguments are not supported by this model:" + f" {list(kwargs['kwargs_call'].keys())}." + ) + kwargs.pop("kwargs_call") + + for k, v in kwargs.items(): + if isinstance(v, allowed_types) or tf.is_tensor(v) or v is None: + output[k] = v + else: + raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") + + if isinstance(main_input, (tuple, list)): + for i, input in enumerate(main_input): + # EagerTensors don't allow to use the .name property so we check for a real Tensor + if is_tf_symbolic_tensor(input): + # Tensor names have always the pattern `name:id` then we check only the + # `name` part + tensor_name = input.name.split(":")[0] + + if tensor_name in parameter_names: + output[tensor_name] = input + else: + output[parameter_names[i]] = input + elif isinstance(input, allowed_types) or input is None: + output[parameter_names[i]] = input + else: + raise ValueError( + f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for" + f" {parameter_names[i]}." + ) + elif isinstance(main_input, Mapping): + if "inputs" in main_input: + warnings.warn( + "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids`" + " instead.", + FutureWarning, + ) + + output["input_ids"] = main_input.pop("inputs") + + if "decoder_cached_states" in main_input: + warnings.warn( + "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use" + " `past_key_values` instead.", + FutureWarning, + ) + output["past_key_values"] = main_input.pop("decoder_cached_states") + + for k, v in dict(main_input).items(): + if isinstance(v, allowed_types) or v is None: + output[k] = v + elif k not in parameter_names and "args" not in parameter_names: + logger.warning( + f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." + ) + continue + else: + raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") + else: + if tf.is_tensor(main_input) or main_input is None: + output[main_input_name] = main_input + else: + raise ValueError( + f"Data of type {type(main_input)} is not allowed only {allowed_types} is accepted for" + f" {main_input_name}." + ) + + # Populates any unspecified argument with their default value, according to the signature. + for name in parameter_names: + if name not in list(output.keys()) and name != "args": + output[name] = kwargs.pop(name, signature[name].default) + + # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) + # So to respect the proper output we have to add this exception + if "args" in output: + if output["args"] is not None and is_tf_symbolic_tensor(output["args"]): + tensor_name = output["args"].name.split(":")[0] + output[tensor_name] = output["args"] + else: + # `args` in this case is always the first parameter, then `input_ids` + output["input_ids"] = output["args"] + + del output["args"] + + if "kwargs" in output: + del output["kwargs"] + + cast_output = {} + for key, val in output.items(): + if isinstance(val, tf.Tensor) and val.dtype == tf.int64: + cast_output[key] = tf.cast(val, tf.int32) + elif isinstance(val, np.ndarray) and val.dtype == np.int64: + cast_output[key] = val.astype(np.int32) + else: + cast_output[key] = val + + output = cast_output + del cast_output + + if config is not None: + boolean_dict = { + k: v + for k, v in output.items() + if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] + } + + output.update( + booleans_processing( + config=config, + **boolean_dict, + ) + ) + + return output + + +def dtype_byte_size(dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(tf.float32) + 4 + ``` + """ + if dtype == tf.bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", dtype.name) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def strip_model_name_and_prefix(name, _prefix=None): + if _prefix is not None and name.startswith(_prefix): + name = name[len(_prefix) :] + if name.startswith("/"): + name = name[1:] + if "model." not in name and len(name.split("/")) > 1: + name = "/".join(name.split("/")[1:]) + return name + + +def tf_shard_checkpoint(weights, max_shard_size="10GB"): + """ + Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a + given size. + + The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no + optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the + limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], + [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will + have a size greater than `max_shard_size`. + + + + Args: + weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit + (like `"5MB"`). + """ + max_shard_size = convert_file_size_to_int(max_shard_size) + + sharded_state_dicts = [] + current_block = [] + current_block_size = 0 + total_size = 0 + + for item in weights: + weight_size = item.numpy().size * dtype_byte_size(item.dtype) + + # If this weight is going to tip up over the maximal size, we split. + if current_block_size + weight_size > max_shard_size: + sharded_state_dicts.append(current_block) + current_block = [] + current_block_size = 0 + + current_block.append(item) + current_block_size += weight_size + total_size += weight_size + + # Add the last block + sharded_state_dicts.append(current_block) + + # If we only have one shard, we return it + if len(sharded_state_dicts) == 1: + return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None + + # Otherwise, let's build the index + weight_map = {} + shards = {} + for idx, shard in enumerate(sharded_state_dicts): + shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5") + shards[shard_file] = shard + for weight in shard: + weight_name = weight.name + weight_map[weight_name] = shard_file + + # Add the metadata + metadata = {"total_size": total_size} + index = {"metadata": metadata, "weight_map": weight_map} + return shards, index + + +def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None): + """ + This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load + the TF weights from the shard file accordingly to their names and shapes. + + This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being + loaded in the model. + + Args: + model (`tf.keras.models.Model`): The model in which to load the checkpoint. + shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names. + ignore_mismatched_sizes`bool`, *optional`, defaults to `True`): + Whether or not to ignore the mismatch between the sizes + strict (`bool`, *optional*, defaults to `True`): + Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. + + Returns: + Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the + mismatched layers. + """ + + # Load the index + unexpected_keys = set() + saved_keys = set() + mismatched_keys = set() + + # Since TF adds the name of the class to its weights, and uses the index and not the name of the layer to load + # the weight, we have to get rid of the first prefix of the name of the layer. + model_keys = set() + model_layer_map = {} + for i, k in enumerate(model.weights): + layer_name = k.name + if _prefix is not None and layer_name.startswith(_prefix): + layer_name = layer_name[len(_prefix) :] + layer_name = layer_name.lstrip("/") + if not ("model." in layer_name or len(layer_name.split("/")) == 1): + layer_name = "/".join(layer_name.split("/")[1:]) + model_keys.add(layer_name) + model_layer_map[layer_name] = i + + for shard_file in shard_files: + saved_weight_names_set, unexpected_keys_set, mismatched_keys_set = load_tf_shard( + model, + model_layer_map, + shard_file, + ignore_mismatched_sizes=ignore_mismatched_sizes, + _prefix=_prefix, + ) + saved_keys.update(saved_weight_names_set) + unexpected_keys.update(unexpected_keys_set) + mismatched_keys.update(mismatched_keys_set) + gc.collect() + + missing_keys = model_keys - saved_keys + if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): + error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" + if len(missing_keys) > 0: + str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) + error_message += f"\nMissing key(s): {str_missing_keys}." + if len(unexpected_keys) > 0: + str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) + error_message += f"\nMissing key(s): {str_unexpected_keys}." + raise RuntimeError(error_message) + + return missing_keys, unexpected_keys, mismatched_keys + + +def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): + """ + Loads a shard from a sharded checkpoint file. Handles the missing keys and unexpected keys. + + Args: + model (`tf.keras.models.Model`): Model in which the weights are loaded + model_layer_map (`Dict`): A dictionary mapping the layer name to the index of the layer in the model. + resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys + + Returns: + `tf.keras.models.Model`: Three lists, one for the layers that were found and succesfully restored (from the + shard file), one for the mismatched layers, and another one for the unexpected layers. + """ + saved_weight_names_set = set() + saved_weights = {} + mismatched_keys = set() + unexpected_keys = set() + # Read the H5 file + try: + with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: + # Retrieve the name of each layer from the H5 file + saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names")) + weight_value_tuples = [] + + # Compute missing and unexpected sub layers + # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] + for layer_name in saved_h5_model_layers_name: + h5_layer_object = sharded_checkpoint_file[layer_name] + saved_weights[layer_name] = np.asarray(h5_layer_object) + + saved_weight_names_set.add(layer_name) + + if layer_name not in model_layer_map: + unexpected_keys.add(layer_name) + else: + symbolic_weight = model.weights[model_layer_map[layer_name]] + + saved_weight_value = saved_weights[layer_name] + # If the current weight is found + if saved_weight_value is not None: + # Check if the shape of the current weight and the one from the H5 file are different + if K.int_shape(symbolic_weight) != saved_weight_value.shape: + # If yes we reshape the weight from the H5 file accordingly to the current weight + # If the two shapes are not compatible we raise an issue + try: + array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) + except ValueError as e: + if ignore_mismatched_sizes: + mismatched_keys.add( + (layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) + ) + continue + else: + raise e + else: + array = saved_weight_value + + # We create the tuple that will be loaded and add it to the final list + weight_value_tuples.append((symbolic_weight, array)) + + K.batch_set_value(weight_value_tuples) + + return saved_weight_names_set, unexpected_keys, mismatched_keys + + except Exception as e: + try: + with open(resolved_archive_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError( + f"Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained" + " model. Make sure you have saved the model properly." + ) from e + except (UnicodeDecodeError, ValueError): + raise OSError( + f"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' " + f"at '{resolved_archive_file}'. " + "If you tried to load a TF model from a sharded checkpoint, you should try converting the model " + "by loading it in pytorch and saving it localy. A convertion script should be realeased soon." + ) + + +def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): + """ + Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and + shapes. + + Args: + model (`tf.keras.models.Model`): + The model to load the weights into. + resolved_archive_file (`str`): + The location of the H5 file. + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): + Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. + + Returns: + Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the + mismatched layers. + """ + if resolved_archive_file.endswith(".safetensors"): + load_function = load_tf_weights_from_safetensors + else: + load_function = load_tf_weights_from_h5 + + return load_function( + model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix + ) + + +def load_tf_weights_from_h5(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): + mismatched_layers = [] + + # Read the H5 file + with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: + # Retrieve the name of each layer from the H5 file + saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names")) + + # Find the missing layers from the high level list of layers + missing_layers = list({layer.name for layer in model.layers} - saved_h5_model_layers_name) + + # Find the unexpected layers from the high level list of layers + unexpected_layers = list(saved_h5_model_layers_name - {layer.name for layer in model.layers}) + saved_weight_names_set = set() + symbolic_weights_names = set() + weight_value_tuples = [] + + # Compute missing and unexpected sub layers + # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] + for layer in model.layers: + # if layer_name from the H5 file belongs to the layers from the instantiated model + if layer.name in saved_h5_model_layers_name: + # Get the H5 layer object from its name + h5_layer_object = sharded_checkpoint_file[layer.name] + # Get all the weights as a list from the layer object + symbolic_weights = layer.trainable_weights + layer.non_trainable_weights + saved_weights = {} + + # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} + # And a set with only the names + for weight_name in load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): + # TF names always start with the model name so we ignore it + name = "/".join(weight_name.split("/")[1:]) + + if _prefix is not None: + name = _prefix + "/" + name + + saved_weights[name] = np.asarray(h5_layer_object[weight_name]) + + # Add the updated name to the final list for computing missing/unexpected values + saved_weight_names_set.add(name) + + # Loop over each weights from the instantiated model and compare with the weights from the H5 file + for symbolic_weight in symbolic_weights: + # TF names always start with the model name so we ignore it + if _prefix is not None: + delimeter = len(_prefix.split("/")) + symbolic_weight_name = "/".join( + symbolic_weight.name.split("/")[:delimeter] + + symbolic_weight.name.split("/")[delimeter + 1 :] + ) + else: + symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) + + # here we check if the current weight is among the weights from the H5 file + # If yes, get the weight_value of the corresponding weight from the H5 file + # If not, make the value to None + saved_weight_value = saved_weights.get(symbolic_weight_name, None) + + # Retrocompatibility patch: some embeddings are stored with the weights name (e.g. Bart's + # `model.shared/embeddings:0` are stored as `model.shared/weights:0`) + if saved_weight_value is None and symbolic_weight_name.endswith("embeddings:0"): + symbolic_weight_name = symbolic_weight_name[:-12] + "weight:0" + saved_weight_value = saved_weights.get(symbolic_weight_name, None) + + # Add the updated name to the final list for computing missing/unexpected values + symbolic_weights_names.add(symbolic_weight_name) + + # If the current weight is found + if saved_weight_value is not None: + # Check if the shape of the current weight and the one from the H5 file are different + if K.int_shape(symbolic_weight) != saved_weight_value.shape: + # If yes we reshape the weight from the H5 file accordingly to the current weight + # If the two shapes are not compatible we raise an issue + try: + array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) + except ValueError as e: + if ignore_mismatched_sizes: + mismatched_layers.append( + (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) + ) + continue + else: + raise e + else: + array = saved_weight_value + + # We create the tuple that will be loaded and add it to the final list + weight_value_tuples.append((symbolic_weight, array)) + + # Load all the weights + K.batch_set_value(weight_value_tuples) + + # Compute the missing and unexpected layers + missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) + unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) + + return missing_layers, unexpected_layers, mismatched_layers + + +def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): + # Read the safetensors file + with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: + mismatched_layers = [] + weight_names = [strip_model_name_and_prefix(w.name, _prefix=_prefix) for w in model.weights] + loaded_weight_names = list(safetensors_archive.keys()) + # Find the missing layers from the high level list of layers + missing_layers = list(set(weight_names) - set(loaded_weight_names)) + # Find the unexpected layers from the high level list of layers + unexpected_layers = list(set(loaded_weight_names) - set(weight_names)) + + for weight in model.weights: + weight_name = strip_model_name_and_prefix(weight.name, _prefix=_prefix) + if weight_name in loaded_weight_names: + weight_value = safetensors_archive.get_tensor(weight_name) + # Check if the shape of the current weight and the one from the H5 file are different + if K.int_shape(weight) != weight_value.shape: + # If yes we reshape the weight from the H5 file accordingly to the current weight + # If the two shapes are not compatible we raise an issue + try: + weight_value = tf.reshape(weight_value, K.int_shape(weight)) + except (ValueError, tf.errors.InvalidArgumentError) as e: + if ignore_mismatched_sizes: + mismatched_layers.append((weight_name, weight_value.shape, K.int_shape(weight))) + continue + else: + raise e + + K.set_value(weight, weight_value) # weight.assign() might break if weight is a DTensor + return missing_layers, unexpected_layers, mismatched_layers + + +def init_copy_embeddings(old_embeddings, new_num_tokens): + r""" + This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case + new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be + kept or not. Example: + + - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] + + - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] + - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] + + - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] + """ + old_num_tokens, old_embedding_dim = shape_list(old_embeddings) + size_diff = new_num_tokens - old_num_tokens + + # initialize new embeddings + # Copy token embeddings from the previous ones + if tf.math.greater(size_diff, 0): + # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size + # and we create a mask to properly identify the padded values and be replaced by the values of the newly created + # embeddings + current_weights = tf.pad( + old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 + ) + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) + mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) + else: + # if the new size if lower than the old one, we take the current embeddings until the new size + current_weights = tf.slice( + old_embeddings.value(), + tf.convert_to_tensor([0, 0]), + tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), + ) + mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) + + return mask, current_weights + + +class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): + r""" + Base class for all TF models. + + [`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, + downloading and saving models as well as a few methods common to all models to: + + - resize the input embeddings, + - prune heads in the self-attention heads. + + Class attributes (overridden by derived classes): + + - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class + for this model architecture. + - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived + classes of the same architecture adding modules on top of the base model. + - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP + models, `pixel_values` for vision models and `input_values` for speech models). + """ + + config_class = None + base_model_prefix = "" + main_input_name = "input_ids" + _auto_class = None + _using_dummy_loss = None + _label_to_output_map = None + + # a list of re pattern of tensor names to ignore from the model when loading the model weights + # (and avoid unnecessary warnings). + _keys_to_ignore_on_load_missing = None + # a list of re pattern of tensor names to ignore from the weights when loading the model weights + # (and avoid unnecessary warnings). + _keys_to_ignore_on_load_unexpected = None + _requires_load_weight_prefix = False + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + dummies = {} + for key, spec in self.input_signature.items(): + # 2 is the most correct arbitrary size. I will not be taking questions + dummy_shape = [dim if dim is not None else 2 for dim in spec.shape] + if spec.shape[0] is None: + # But let's make the batch size 1 to save memory anyway + dummy_shape[0] = 1 + dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype) + if key == "token_type_ids": + # Some models have token_type_ids but with a vocab_size of 1 + dummies[key] = tf.zeros_like(dummies[key]) + if self.config.add_cross_attention and "encoder_hidden_states" in inspect.signature(self.call).parameters: + if "encoder_hidden_states" not in dummies: + if self.main_input_name == "input_ids": + dummies["encoder_hidden_states"] = tf.ones( + shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name="encoder_hidden_states" + ) + else: + raise NotImplementedError( + "Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!" + ) + return dummies + + def build_in_name_scope(self): + with tf.name_scope(self.name): + self.build(input_shape=None) + + @property + def framework(self) -> str: + """ + :str: Identifies that this is a TensorFlow model. + """ + return "tf" + + def build(self, input_shape=None): + pass # This is just here to make sure we don't call the superclass build() + + def __init__(self, config, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + if not isinstance(config, PretrainedConfig): + raise ValueError( + f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " + "`PretrainedConfig`. To create a model from a pretrained model use " + f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + # Save config and origin of the pretrained weights if given in model + self.config = config + self.name_or_path = config.name_or_path + self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + self._set_save_spec(self.input_signature) + + def get_config(self): + return self.config.to_dict() + + @classmethod + def from_config(cls, config, **kwargs): + if isinstance(config, PretrainedConfig): + return cls._from_config(config, **kwargs) + return cls._from_config(cls.config_class.from_dict(config, **kwargs)) + + @classmethod + def _from_config(cls, config, **kwargs): + """ + All context managers that the model should be initialized under go here. + """ + return cls(config, **kwargs) + + def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor: + """ + Prepare the head mask if needed. + + Args: + head_mask (`tf.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): + The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). + num_hidden_layers (`int`): + The number of hidden layers in the model. + + Returns: + `tf.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with + `[None]` for each layer. + """ + if head_mask is not None: + head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) + else: + head_mask = [None] * num_hidden_layers + + return head_mask + + def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): + """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" + if head_mask.shape.rank == 1: + head_mask = head_mask[None, None, :, None, None] + head_mask = tf.repeat(head_mask, repeats=num_hidden_layers, axis=0) + elif head_mask.shape.rank == 2: + head_mask = head_mask[:, None, :, None, None] + assert head_mask.shape.rank == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" + head_mask = tf.cast(head_mask, tf.float32) # switch to float if need + fp16 compatibility + return head_mask + + @tf.function + def serving(self, inputs): + """ + Args: + Method used for serving the model. Does not have a specific signature, but will be specialized as concrete + functions when saving with `save_pretrained`. + inputs (`Dict[str, tf.Tensor]`): + The input of the saved model as a dictionary of tensors. + """ + output = self.call(inputs) + + return self.serving_output(output) + + def eager_serving(self, inputs): + """ + Method used for serving the model. This method is deprecated, and will be removed. + + Args: + inputs (`Dict[str, tf.Tensor]`): + The input of the saved model as a dictionary of tensors. + """ + warnings.warn( + "The function `eager_serving` is deprecated and will be removed in version 4.32.0 of Transformers", + FutureWarning, + ) + output = self.call(inputs) + + return self.serving_output(output) + + @property + def input_signature(self) -> Dict[str, tf.TensorSpec]: + """ + This property should return a dict mapping input names to tf.TensorSpec objects, representing the expected + shape and dtype for model inputs. It is used for both serving and for generating the dummy inputs used to build + the model. + """ + model_inputs = list(inspect.signature(self.call).parameters) + sig = {} + if "input_ids" in model_inputs: + if self.__class__.__name__.endswith("ForMultipleChoice"): + text_dims = 3 + else: + text_dims = 2 + for input_name in ( + "input_ids", + "attention_mask", + "token_type_ids", + "decoder_input_ids", + "decoder_attention_mask", + ): + if input_name in model_inputs: + sig[input_name] = tf.TensorSpec([None] * text_dims, tf.int32, name=input_name) + if "pixel_values" in model_inputs: + pixel_values_shape = [None, None, None, None] + if hasattr(self.config, "vision_config"): + vision_config = self.config.vision_config + else: + vision_config = self.config + if hasattr(vision_config, "num_channels"): + pixel_values_shape[1] = vision_config.num_channels + else: + raise NotImplementedError( + "Could not infer number of channels from config, please override input_signature to specify input shapes." + ) + if hasattr(vision_config, "image_size"): + pixel_values_shape[2] = pixel_values_shape[3] = vision_config.image_size + elif hasattr(vision_config, "input_size"): + pixel_values_shape[2] = pixel_values_shape[3] = vision_config.input_size + else: + raise NotImplementedError( + "Could not infer input image shape from config, please override input_signature to specify input shapes." + ) + sig["pixel_values"] = tf.TensorSpec(pixel_values_shape, tf.float32, name="pixel_values") + if "input_features" in model_inputs: + raise NotImplementedError("Audio models need a manually defined input_signature") + return sig + + def serving_output(self, output): + """ + Prepare the output of the saved model. Can be overridden if specific serving modifications are required. + """ + if not isinstance(output, ModelOutput): + return output + for key in output: + if key.endswith("hidden_states") and not getattr(self.config, "output_hidden_states", False): + output[key] = None + elif key.endswith("attentions") and not getattr(self.config, "output_attentions", False): + output[key] = None + elif key == "past_key_values" and not getattr(self.config, "use_cache", False): + output[key] = None + elif key == "cross_attentions" and not ( + getattr(self.config, "output_attentions", False) and getattr(self.config, "add_cross_attention", False) + ): + output[key] = None + if isinstance(output[key], (tuple, list)): + try: + output[key] = tf.convert_to_tensor(output[key]) + except (ValueError, tf.errors.InvalidArgumentError): + pass # Layers may not have the same dimensions + return output + + @classmethod + def can_generate(cls) -> bool: + """ + Returns whether this model can generate sequences with `.generate()`. + + Returns: + `bool`: Whether this model can generate sequences with `.generate()`. + """ + # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. + # Alternativelly, the model can also have a custom `generate` function. + if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): + return False + return True + + def get_input_embeddings(self) -> tf.keras.layers.Layer: + """ + Returns the model's input embeddings layer. + + Returns: + `tf.Variable`: The embeddings layer mapping vocabulary to hidden states. + """ + main_layer = getattr(self, self.base_model_prefix, self) + + if main_layer is not self: + return main_layer.get_input_embeddings() + else: + raise NotImplementedError + + def _save_checkpoint(self, checkpoint_dir, epoch): + if not os.path.isdir(checkpoint_dir): + os.mkdir(checkpoint_dir) + # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer + # state for us, because it requires special handling for objects like custom losses, which we use + # internally and which users are likely to use too + weights_path = os.path.join(checkpoint_dir, "weights.h5") + self.save_weights(weights_path) + extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()} + extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle") + with open(extra_data_path, "wb") as f: + pickle.dump(extra_data, f) + + def load_repo_checkpoint(self, repo_path_or_name): + """ + Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when + the checkpoint was made. + + Args: + repo_path_or_name (`str`): + Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case + the repository will have the name of that local folder). + + Returns: + `dict`: A dictionary of extra metadata from the checkpoint, most commonly an "epoch" count. + """ + if getattr(self, "optimizer", None) is None: + raise RuntimeError( + "Checkpoint loading failed as no optimizer is attached to the model. " + "This is most likely caused by the model not being compiled." + ) + if os.path.isdir(repo_path_or_name): + local_dir = repo_path_or_name + else: + # If this isn't a local path, check that the remote repo exists and has a checkpoint in it + repo_files = list_repo_files(repo_path_or_name) + for file in ("checkpoint/weights.h5", "checkpoint/extra_data.pickle"): + if file not in repo_files: + raise FileNotFoundError(f"Repo {repo_path_or_name} does not contain checkpoint file {file}!") + repo = Repository(repo_path_or_name.split("/")[-1], clone_from=repo_path_or_name) + local_dir = repo.local_dir + + # Now make sure the repo actually has a checkpoint in it. + checkpoint_dir = os.path.join(local_dir, "checkpoint") + weights_file = os.path.join(checkpoint_dir, "weights.h5") + if not os.path.isfile(weights_file): + raise FileNotFoundError(f"Could not find checkpoint file weights.h5 in repo {repo_path_or_name}!") + extra_data_file = os.path.join(checkpoint_dir, "extra_data.pickle") + if not os.path.isfile(extra_data_file): + raise FileNotFoundError(f"Could not find checkpoint file extra_data.pickle in repo {repo_path_or_name}!") + + # Assuming the repo is real and we got a checkpoint, load the weights and the optimizer state into the model. + # The optimizer state includes the iteration count, so learning rate schedules should resume as normal too. + self.load_weights(weights_file) + with open(extra_data_file, "rb") as f: + extra_data = pickle.load(f) + self.optimizer.set_weights(extra_data["optimizer_state"]) + + # Finally, return the epoch number from the checkpoint. This isn't a property of the model, so we can't + # set it directly, but the user can pass it to fit(). + return {"epoch": extra_data["epoch"]} + + def prepare_tf_dataset( + self, + dataset: "datasets.Dataset", # noqa:F821 + batch_size: int = 8, + shuffle: bool = True, + tokenizer: Optional["PreTrainedTokenizerBase"] = None, + collate_fn: Optional[Callable] = None, + collate_fn_args: Optional[Dict[str, Any]] = None, + drop_remainder: Optional[bool] = None, + prefetch: bool = True, + ): + """ + Wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching. This method is + designed to create a "ready-to-use" dataset that can be passed directly to Keras methods like `fit()` without + further modification. The method will drop columns from the dataset if they don't match input names for the + model. If you want to specify the column names to return rather than using the names that match this model, we + recommend using `Dataset.to_tf_dataset()` instead. + + Args: + dataset (`Any`): + A [~`datasets.Dataset`] to be wrapped as a `tf.data.Dataset`. + batch_size (`int`, defaults to 8): + The size of batches to return. + shuffle (`bool`, defaults to `True`): + Whether to return samples from the dataset in random order. Usually `True` for training datasets and + `False` for validation/test datasets. + tokenizer ([`PreTrainedTokenizerBase`], *optional*): + A `PreTrainedTokenizer` that will be used to pad samples to create batches. Has no effect if a specific + `collate_fn` is passed instead. + collate_fn (`Callable`, *optional*): + A function that collates samples from the dataset into a single batch. Defaults to + `DefaultDataCollator` if no `tokenizer` is supplied or `DataCollatorWithPadding` if a `tokenizer` is + passed. + collate_fn_args (`Dict[str, Any]`, *optional*): + A dict of arguments to pass to the `collate_fn` alongside the list of samples. + drop_remainder (`bool`, *optional*): + Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults + to the same setting as `shuffle`. + prefetch (`bool`, defaults to `True`): + Whether to add prefetching to the end of the `tf.data` pipeline. This is almost always beneficial for + performance, but can be disabled in edge cases. + + + Returns: + `Dataset`: A `tf.data.Dataset` which is ready to pass to the Keras API. + """ + requires_backends(self, ["datasets"]) + import datasets + + if collate_fn is None: + if tokenizer is None: + collate_fn = DefaultDataCollator(return_tensors="np") + else: + collate_fn = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="np") + if collate_fn_args is None: + collate_fn_args = {} + + if not isinstance(dataset, datasets.Dataset): + raise TypeError("Dataset argument should be a datasets.Dataset!") + model_inputs = list(inspect.signature(self.call).parameters) + model_labels = find_labels(self.__class__) + if "cols_to_retain" in list(inspect.signature(dataset._get_output_signature).parameters.keys()): + output_signature, _ = dataset._get_output_signature( + dataset, + batch_size=None, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + cols_to_retain=model_inputs, + ) + else: + # TODO Matt: This is a workaround for older versions of datasets that are missing the `cols_to_retain` + # argument. We should remove this once the minimum supported version of datasets is > 2.3.2 + unwanted_columns = [ + feature + for feature in dataset.features + if feature not in model_inputs and feature not in ("label_ids", "label") + ] + dataset = dataset.remove_columns(unwanted_columns) + output_signature, _ = dataset._get_output_signature( + dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args + ) + output_columns = list(output_signature.keys()) + feature_cols = [col for col in output_columns if col in model_inputs and col not in model_labels] + label_cols = [col for col in output_columns if col in model_labels] + + # Backwards compatibility for older versions of datasets. Previously, if `columns` or `label_cols` + # were a single element list, the returned element spec would be a single element. Now, passing [feature] + # will return a dict structure {"feature": feature}, and passing a single string will return a single element. + feature_cols = feature_cols[0] if len(feature_cols) == 1 else feature_cols + label_cols = label_cols[0] if len(label_cols) == 1 else label_cols + + if drop_remainder is None: + drop_remainder = shuffle + tf_dataset = dataset.to_tf_dataset( + columns=feature_cols, + label_cols=label_cols, + batch_size=batch_size, + shuffle=shuffle, + drop_remainder=drop_remainder, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + prefetch=prefetch, + ) + return tf_dataset + + def compile( + self, + optimizer="rmsprop", + loss="auto_with_warning", + metrics=None, + loss_weights=None, + weighted_metrics=None, + run_eagerly=None, + steps_per_execution=None, + **kwargs, + ): + """ + This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss + function themselves. + """ + if loss in ("auto_with_warning", "passthrough"): # "passthrough" for workflow backward compatibility + logger.info( + "No loss specified in compile() - the model's internal loss computation will be used as the " + "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! " + "To disable this behaviour please pass a loss argument, or explicitly pass " + "`loss=None` if you do not want your model to compute a loss. You can also specify `loss='auto'` to " + "get the internal loss without printing this info string." + ) + loss = "auto" + if loss == "auto": + loss = dummy_loss + self._using_dummy_loss = True + else: + self._using_dummy_loss = False + parent_args = list(inspect.signature(tf.keras.Model.compile).parameters.keys()) + # This argument got renamed, we need to support both versions + if "steps_per_execution" in parent_args: + super().compile( + optimizer=optimizer, + loss=loss, + metrics=metrics, + loss_weights=loss_weights, + weighted_metrics=weighted_metrics, + run_eagerly=run_eagerly, + steps_per_execution=steps_per_execution, + **kwargs, + ) + else: + super().compile( + optimizer=optimizer, + loss=loss, + metrics=metrics, + loss_weights=loss_weights, + weighted_metrics=weighted_metrics, + run_eagerly=run_eagerly, + experimental_steps_per_execution=steps_per_execution, + **kwargs, + ) + + def compute_loss(self, *args, **kwargs): + if hasattr(tf.keras.Model, "compute_loss"): + # This will be true in TF 2.8 or greater + return super().compute_loss(*args, **kwargs) + else: + warnings.warn( + "The old compute_loss method is deprecated as it conflicts with the Keras compute_loss " + "method added in TF 2.8. If you want the original HF compute_loss, please call " + "hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, " + "calling compute_loss() will get the Keras method instead.", + FutureWarning, + ) + return self.hf_compute_loss(*args, **kwargs) + + def get_label_to_output_name_mapping(self): + arg_names = list(inspect.signature(self.call).parameters) + if self._label_to_output_map is not None: + return self._label_to_output_map + elif "start_positions" in arg_names: + return {"start_positions": "start_logits", "end_positions": "end_logits"} + elif "sentence_order_label" in arg_names: + return {"labels": "prediction_logits", "sentence_order_label": "sop_logits"} + elif "next_sentence_label" in arg_names: + return {"labels": "prediction_logits", "next_sentence_label": "seq_relationship_logits"} + elif "mc_labels" in arg_names: + return {"labels": "logits", "mc_labels": "mc_logits"} + else: + return {} + + def train_step(self, data): + """ + A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models + and supports directly training on the loss output head. In addition, it ensures input keys are copied to the + labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure + that they are available to the model during the forward pass. + """ + + # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map` + arg_names = list(inspect.signature(self.call).parameters) + label_kwargs = find_labels(self.__class__) + label_to_output = self.get_label_to_output_name_mapping() + output_to_label = {val: key for key, val in label_to_output.items()} + if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): + # Newer TF train steps leave this out + data = expand_1d(data) + x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) + # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify + # them during input/label pre-processing. This avoids surprising the user by wrecking their data. + # In addition, modifying mutable Python inputs makes XLA compilation impossible. + if isinstance(x, dict): + x = x.copy() + if isinstance(y, dict): + y = y.copy() + + # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments, + # if those keys are not already present in the input dict + if self._using_dummy_loss and y is not None: + # If y is a tensor and the model only has one label-like input, map y to that input + if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): + if isinstance(x, tf.Tensor): + x = {arg_names[0]: x} + label_kwarg = next(iter(label_kwargs)) + if label_kwarg not in x: + x[label_kwarg] = y + # Otherwise, copy keys from y to x as long as they weren't already present in x + elif isinstance(y, dict): + if isinstance(x, tf.Tensor): + x = {arg_names[0]: x} + for key, val in y.items(): + if key in arg_names and key not in x: + x[key] = val + elif output_to_label.get(key, None) in arg_names and key not in x: + x[output_to_label[key]] = val + if y is None: + y = {key: val for key, val in x.items() if key in label_kwargs} + if not y and not self._using_dummy_loss: + raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!") + + if isinstance(y, dict): + # Rename labels at this point to match output heads + y = {label_to_output.get(key, key): val for key, val in y.items()} + + # Run forward pass. + with tf.GradientTape() as tape: + if self._using_dummy_loss and "return_loss" in arg_names: + y_pred = self(x, training=True, return_loss=True) + else: + y_pred = self(x, training=True) + if self._using_dummy_loss: + loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) + else: + loss = None + + # This next block matches outputs to label keys. Tensorflow's standard method for doing this + # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors) + if isinstance(y, dict) and len(y) == 1: + if list(y.keys())[0] in y_pred.keys(): + y_pred = y_pred[list(y.keys())[0]] + elif list(y_pred.keys())[0] == "loss": + y_pred = y_pred[1] + else: + y_pred = y_pred[0] + _, y = y.popitem() + elif isinstance(y, dict): + # If the labels are a dict, match keys from the output by name + y_pred = {key: val for key, val in y_pred.items() if key in y} + elif isinstance(y, tuple) or isinstance(y, list): + # If the labels are a tuple/list, match keys to the output by order, skipping the loss. + if list(y_pred.keys())[0] == "loss": + y_pred = y_pred.to_tuple()[1:] + else: + y_pred = y_pred.to_tuple() + y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems + else: + # If the labels are a single tensor, match them to the first non-loss tensor in the output + if list(y_pred.keys())[0] == "loss": + y_pred = y_pred[1] + else: + y_pred = y_pred[0] + + if loss is None: + loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) + + # Run backwards pass. + self.optimizer.minimize(loss, self.trainable_variables, tape=tape) + + self.compiled_metrics.update_state(y, y_pred, sample_weight) + # Collect metrics to return + return_metrics = {} + for metric in self.metrics: + result = metric.result() + if isinstance(result, dict): + return_metrics.update(result) + else: + return_metrics[metric.name] = result + return return_metrics + + def test_step(self, data): + """ + A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models + and supports directly training on the loss output head. In addition, it ensures input keys are copied to the + labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure + that they are available to the model during the forward pass. + """ + # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map` + arg_names = list(inspect.signature(self.call).parameters) + label_kwargs = find_labels(self.__class__) + label_to_output = self.get_label_to_output_name_mapping() + output_to_label = {val: key for key, val in label_to_output.items()} + if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): + # Newer versions leave this out + data = expand_1d(data) + x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) + # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify + # them during input/label pre-processing. This avoids surprising the user by wrecking their data. + # In addition, modifying mutable Python inputs makes XLA compilation impossible. + if isinstance(x, dict): + x = x.copy() + if isinstance(y, dict): + y = y.copy() + + # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments, + # if those keys are not already present in the input dict + if self._using_dummy_loss and y is not None: + arg_names = list(inspect.signature(self.call).parameters) + # If y is a tensor and the model only has one label-like input, map y to that input + if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): + if isinstance(x, tf.Tensor): + x = {arg_names[0]: x} + label_kwarg = next(iter(label_kwargs)) + if label_kwarg not in x: + x[label_kwarg] = y + # Otherwise, copy keys from y to x as long as they weren't already present in x + elif isinstance(y, dict): + if isinstance(x, tf.Tensor): + x = {arg_names[0]: x} + for key, val in y.items(): + if key in arg_names and key not in x: + x[key] = val + elif output_to_label.get(key, None) in arg_names and key not in x: + x[output_to_label[key]] = val + if y is None: + y = {key: val for key, val in x.items() if key in label_kwargs} + if not y and not self._using_dummy_loss: + raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!") + + if isinstance(y, dict): + # Rename labels at this point to match output heads + y = {label_to_output.get(key, key): val for key, val in y.items()} + + # Run forward pass. + if self._using_dummy_loss and "return_loss" in arg_names: + y_pred = self(x, return_loss=True, training=False) + else: + y_pred = self(x, training=False) + if self._using_dummy_loss: + loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) + else: + loss = None + + # This next block matches outputs to label keys. Tensorflow's standard method for doing this + # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors) + if isinstance(y, dict) and len(y) == 1: + if list(y.keys())[0] in y_pred.keys(): + y_pred = y_pred[list(y.keys())[0]] + elif list(y_pred.keys())[0] == "loss": + y_pred = y_pred[1] + else: + y_pred = y_pred[0] + _, y = y.popitem() + elif isinstance(y, dict): + # If the labels are a dict, match keys from the output by name + y_pred = {key: val for key, val in y_pred.items() if key in y} + elif isinstance(y, tuple) or isinstance(y, list): + # If the labels are a tuple/list, match keys to the output by order, skipping the loss. + if list(y_pred.keys())[0] == "loss": + y_pred = y_pred.to_tuple()[1:] + else: + y_pred = y_pred.to_tuple() + y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems + else: + # If the labels are a single tensor, match them to the first non-loss tensor in the output + if list(y_pred.keys())[0] == "loss": + y_pred = y_pred[1] + else: + y_pred = y_pred[0] + + if loss is None: + loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) + + self.compiled_metrics.update_state(y, y_pred, sample_weight) + # Collect metrics to return + return_metrics = {} + for metric in self.metrics: + result = metric.result() + if isinstance(result, dict): + return_metrics.update(result) + else: + return_metrics[metric.name] = result + return return_metrics + + def create_model_card( + self, + output_dir, + model_name: str, + language: Optional[str] = None, + license: Optional[str] = None, + tags: Optional[str] = None, + finetuned_from: Optional[str] = None, + tasks: Optional[str] = None, + dataset_tags: Optional[Union[str, List[str]]] = None, + dataset: Optional[Union[str, List[str]]] = None, + dataset_args: Optional[Union[str, List[str]]] = None, + ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + output_dir (`str` or `os.PathLike`): + The folder in which to create the model card. + model_name (`str`, *optional*): + The name of the model. + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ + # Avoids a circular import by doing this when necessary. + from .modelcard import TrainingSummary # tests_ignore + + training_summary = TrainingSummary.from_keras( + self, + keras_history=self.history, + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + ) + model_card = training_summary.to_model_card() + with open(os.path.join(output_dir, "README.md"), "w") as f: + f.write(model_card) + + def set_input_embeddings(self, value): + """ + Set model's input embeddings + + Args: + value (`tf.Variable`): + The new weights mapping hidden states to vocabulary. + """ + main_layer = getattr(self, self.base_model_prefix) + + if main_layer is None: + raise NotImplementedError("The model does not implements the base_model_prefix attribute.") + + try: + main_layer.set_input_embeddings(value) + except AttributeError: + logger.info("Building the model") + self.build_in_name_scope() + main_layer.set_input_embeddings(value) + + def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]: + """ + Returns the model's output embeddings + + Returns: + `tf.Variable`: The new weights mapping vocabulary to hidden states. + """ + if self.get_lm_head() is not None: + lm_head = self.get_lm_head() + + try: + return lm_head.get_output_embeddings() + except AttributeError: + logger.info("Building the model") + self.build_in_name_scope() + + return lm_head().get_output_embeddings() + + return None # Overwrite for models with output embeddings + + def set_output_embeddings(self, value): + """ + Set model's output embeddings + + Args: + value (`tf.Variable`): + The new weights mapping hidden states to vocabulary. + """ + if self.get_lm_head() is not None: + lm_head = self.get_lm_head() + try: + lm_head.set_output_embeddings(value) + except AttributeError: + logger.info("Building the model") + self.build_in_name_scope() + lm_head.set_output_embeddings(value) + + def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]: + """ + Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the + embeddings + + Return: + `tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model. + """ + warnings.warn( + "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning + ) + return self.get_lm_head() + + def get_prefix_bias_name(self) -> Union[None, str]: + """ + Get the concatenated _prefix name of the bias from the model name to the parent layer + + Return: + `str`: The _prefix name of the bias. + """ + warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) + return None + + def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: + """ + Dict of bias attached to an LM head. The key represents the name of the bias attribute. + + Return: + `tf.Variable`: The weights representing the bias, None if not an LM model. + """ + if self.get_lm_head() is not None: + lm_head = self.get_lm_head() + try: + return lm_head.get_bias() + except AttributeError: + self.build_in_name_scope() + + return lm_head.get_bias() + return None + + def set_bias(self, value): + """ + Set all the bias in the LM head. + + Args: + value (`Dict[tf.Variable]`): + All the new bias attached to an LM head. + """ + if self.get_lm_head() is not None: + lm_head = self.get_lm_head() + try: + lm_head.set_bias(value) + except AttributeError: + self.build_in_name_scope() + lm_head.set_bias(value) + + def get_lm_head(self) -> tf.keras.layers.Layer: + """ + The LM Head layer. This method must be overwritten by all the models that have a lm head. + + Return: + `tf.keras.layers.Layer`: The LM head layer if the model has one, None if not. + """ + return None + + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None + ) -> Union[tf.keras.layers.Embedding, tf.Variable]: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + new_num_tokens (`int`, *optional*): + The number of new tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens without doing anything. + + Return: + `tf.Variable` or `tf.keras.layers.Embedding`: Pointer to the input tokens of the model. + """ + # TODO (joao): flagged for replacement (by `_v2_resized_token_embeddings`) due to embeddings refactor + + # Run the new code path if the model has a keras embeddings layer + if isinstance(self.get_input_embeddings(), tf.keras.layers.Embedding): + return self._v2_resized_token_embeddings(new_num_tokens) + + if new_num_tokens is None or new_num_tokens == self.config.vocab_size: + return self._get_word_embedding_weight(self.get_input_embeddings()) + + model_embeds = self._resize_token_embeddings(new_num_tokens) + + # Update base model and current model config + self.config.vocab_size = new_num_tokens + + return model_embeds + + def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int] = None) -> tf.keras.layers.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Arguments: + new_num_tokens (`int`, *optional*): + The number of new tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens without doing anything. + + Return: + `tf.keras.layers.Embedding`: Pointer to the input tokens of the model. + """ + if new_num_tokens is None or new_num_tokens == self.config.vocab_size: + return self.get_input_embeddings() + + model_embeds = self._v2_resize_token_embeddings(new_num_tokens) + + # Update base model and current model config + self.config.vocab_size = new_num_tokens + + return model_embeds + + def _get_word_embedding_weight(model, embedding_layer): + # TODO (joao): flagged for delection due to embeddings refactor + + # If the variable holds the weights themselves, return them + if isinstance(embedding_layer, tf.Tensor): + return embedding_layer + # Otherwise, try to get them from the layer's attributes + + embeds = getattr(embedding_layer, "weight", None) + if embeds is not None: + return embeds + + embeds = getattr(embedding_layer, "decoder", None) + if embeds is not None: + return embeds + + # The reason why the attributes don't exist might be + # because the model is not built, so retry getting + # the argument after building the model + model.build_in_name_scope() + + embeds = getattr(embedding_layer, "weight", None) + if embeds is not None: + return embeds + + embeds = getattr(embedding_layer, "decoder", None) + if embeds is not None: + return embeds + + return None + + def _resize_token_embeddings(self, new_num_tokens): + # TODO (joao): flagged for replacement (by `_v2_resize_token_embeddings`) due to embeddings refactor + old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + + # if word embeddings are not tied, make sure that lm head bias is resized as well + if self.get_bias() is not None: + old_lm_head_bias = self.get_bias() + new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) + + self.set_bias(new_lm_head_bias) + + # if word embeddings are not tied, make sure that lm head decoder is resized as well + if self.get_output_embeddings() is not None: + old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) + new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) + + self.set_output_embeddings(new_lm_head_decoder) + + self.set_input_embeddings(new_embeddings) + + return self.get_input_embeddings() + + def _v2_resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._v2_get_resized_embeddings(old_embeddings, new_num_tokens) + self.set_input_embeddings(new_embeddings) + + # If word embeddings are not tied, make sure that lm head bias is resized as well + if self.get_bias() is not None: + old_lm_head_bias = self.get_bias() + new_lm_head_bias = self._v2_get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) + self.set_bias(new_lm_head_bias) + + # If word embeddings are not tied, make sure that lm head decoder is resized as well. + tied_weights = self.get_input_embeddings() == self.get_output_embeddings() + if self.get_output_embeddings() is not None and not tied_weights: + old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) + # TODO (joao): this one probably needs a v2 version with other models + new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) + self.set_output_embeddings(new_lm_head_decoder) + + return self.get_input_embeddings() + + def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): + """ + Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. + Reducing the size will remove vectors from the end + + Args: + old_lm_head_bias (`tf.Variable`): + Old lm head bias to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the linear matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns None + + Return: + `tf.Variable`: Pointer to the resized bias. + """ + # TODO (joao): flagged for replacement (by `_v2_get_resized_lm_head_bias`) due to embeddings refactor + new_lm_head_bias = {} + + for attr, weight in old_lm_head_bias.items(): + first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) + size_diff = new_num_tokens - old_num_tokens + final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] + + # initialize new bias + if tf.math.greater(size_diff, 0): + padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] + current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] + bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) + bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) + else: + slice_from = [0] if first_dim is None else [0, 0] + current_bias = tf.slice( + weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) + ) + bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) + + new_bias = self.add_weight( + shape=final_shape, + initializer="zeros", + trainable=True, + name=weight.name.split(":")[0], + ) + init_bias = tf.where(bias_mask, current_bias, new_bias.value()) + + new_bias.assign(init_bias) + new_lm_head_bias[attr] = new_bias + + return new_lm_head_bias + + def _v2_get_resized_lm_head_bias( + self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int + ) -> Dict[str, tf.Tensor]: + """ + Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. + Reducing the size will remove vectors from the end + + Args: + old_lm_head_bias (`Dict[str, tf.Variable]`): + Old lm head bias to be resized. + new_num_tokens (`int`): + New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at + the end. Reducing the size will remove vectors from the end. + + Return: + `tf.Tensor`: Values for the resized bias. + """ + new_lm_head_bias = {} + + for attr, weight in old_lm_head_bias.items(): + # Determine the size difference (depending on the shape) + first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) + size_diff = new_num_tokens - old_num_tokens + + # Copy the old bias values to the new bias + if old_num_tokens > new_num_tokens: + new_bias = weight.value()[..., :new_num_tokens] + else: + padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] + new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape)) + + new_lm_head_bias[attr] = new_bias + return new_lm_head_bias + + def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): + """ + Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. + Reducing the size will remove vectors from the end + + Args: + old_lm_head_decoder (`tf.Variable`): + Old lm head decoder to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the linear matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns None + + Return: + `tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input + ones. + """ + new_lm_head_decoder = old_lm_head_decoder + is_input_output_equals = tf.reduce_any( + self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder + ) + + if old_lm_head_decoder is not None and not is_input_output_equals: + old_embedding_dim = shape_list(old_lm_head_decoder)[1] + decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) + new_lm_head_decoder = self.add_weight( + shape=(new_num_tokens, old_embedding_dim), + initializer="zeros", + trainable=True, + name=old_lm_head_decoder.name.split(":")[0], + ) + init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) + + new_lm_head_decoder.assign(init_decoder) + + return new_lm_head_decoder + + def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: + """ + Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly + initialized vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_embeddings (`tf.Variable`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `tf.Variable` module of the model without doing anything. + + Return: + `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is + `None` + """ + # TODO (joao): flagged for replacement (by `_v2_get_resized_embeddings`) due to embeddings refactor + old_embedding_dim = shape_list(old_embeddings)[1] + init_range = getattr(self.config, "initializer_range", 0.02) + embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) + new_embeddings = self.add_weight( + name=old_embeddings.name.split(":")[0], + shape=[new_num_tokens, old_embedding_dim], + initializer=get_initializer(init_range), + dtype=tf.float32, + ) + init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) + + new_embeddings.assign(init_embeddings) + + return new_embeddings + + def _v2_get_resized_embeddings( + self, old_embeddings: tf.keras.layers.Embedding, new_num_tokens: int + ) -> tf.keras.layers.Embedding: + """ + Build a resized Embedding layer from a provided Embedding layer. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. + + Args: + old_embeddings (`tf.keras.layers.Embedding`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Return: + `tf.keras.layers.Embedding`: Resized Embedding layer. + """ + + # Get the initialization range for the embeddings + init_range = 0.02 # default value + potential_initialization_variable_names = [ + "initializer_range", # most common + "initializer_factor", # e.g. T5 + "init_std", # e.g BART + ] + for var_name in potential_initialization_variable_names: + if hasattr(self.config, var_name): + init_range = getattr(self.config, var_name) + + # Get a new (initialized) embeddings layer + new_embeddings = tf.keras.layers.Embedding( + input_dim=new_num_tokens, + output_dim=old_embeddings.output_dim, + embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=init_range), + name=old_embeddings.embeddings.name[:-13], # exact same scoped name except "/embeddings:0" + ) + new_embeddings(tf.constant([[0]])) + + # Copy the old embeddings to the new embeddings + if old_embeddings.input_dim >= new_num_tokens: + init_embeddings = old_embeddings.embeddings[:new_num_tokens] + else: + init_embeddings = tf.concat( + [old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim :]], axis=0 + ) + new_embeddings.embeddings.assign(init_embeddings) + return new_embeddings + + def prune_heads(self, heads_to_prune): + """ + Prunes heads of the base model. + + Arguments: + heads_to_prune (`Dict[int, List[int]]`): + Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads + to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on + layer 1 and heads 2 and 3 on layer 2. + """ + raise NotImplementedError + + def save_pretrained( + self, + save_directory, + saved_model=False, + version=1, + push_to_hub=False, + signatures=None, + max_shard_size: Union[int, str] = "10GB", + create_pr: bool = False, + safe_serialization: bool = False, + token: Optional[Union[str, bool]] = None, + **kwargs, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + [`~TFPreTrainedModel.from_pretrained`] class method. + + Arguments: + save_directory (`str`): + Directory to which to save. Will be created if it doesn't exist. + saved_model (`bool`, *optional*, defaults to `False`): + If the model has to be saved in saved model format as well or not. + version (`int`, *optional*, defaults to 1): + The version of the saved model. A saved model needs to be versioned in order to be properly loaded by + TensorFlow Serving as detailed in the official documentation + https://www.tensorflow.org/tfx/serving/serving_basic + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + signatures (`dict` or `tf.function`, *optional*): + Model's signature used for serving. This will be passed to the `signatures` argument of model.save(). + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save the model using `safetensors` or the traditional TensorFlow way (that uses `h5`). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + if saved_model: + # If `torch_dtype` is in the config with a torch dtype class as the value, we need to change it to string. + # (Although TF doesn't care about this attribute, we can't just remove it or set it to `None`.) + if getattr(self.config, "torch_dtype", None) is not None and not isinstance(self.config.torch_dtype, str): + self.config.torch_dtype = str(self.config.torch_dtype).split(".")[1] + if signatures is None: + serving_default = self.serving.get_concrete_function(self.input_signature) + if any(spec.dtype == tf.int32 for spec in self.input_signature.values()): + int64_spec = { + key: tf.TensorSpec( + shape=spec.shape, dtype=tf.int64 if spec.dtype == tf.int32 else spec.dtype, name=spec.name + ) + for key, spec in self.input_signature.items() + } + int64_serving = self.serving.get_concrete_function(int64_spec) + signatures = {"serving_default": serving_default, "int64_serving": int64_serving} + else: + signatures = serving_default + saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) + self.save(saved_model_dir, include_optimizer=False, signatures=signatures) + logger.info(f"Saved model created in {saved_model_dir}") + + # Save configuration file + self.config.architectures = [self.__class__.__name__[2:]] + + # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self.config) + + self.config.save_pretrained(save_directory) + if self.can_generate(): + self.generation_config.save_pretrained(save_directory) + + # If we save using the predefined names, we can load using `from_pretrained` + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else TF2_WEIGHTS_NAME + output_model_file = os.path.join(save_directory, weights_name) + + shards, index = tf_shard_checkpoint(self.weights, max_shard_size) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + ): + os.remove(full_filename) + + if index is None: + if safe_serialization: + state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in self.weights} + safe_save_file(state_dict, output_model_file, metadata={"format": "tf"}) + else: + self.save_weights(output_model_file) + logger.info(f"Model weights saved in {output_model_file}") + else: + save_index_file = os.path.join(save_directory, TF2_WEIGHTS_INDEX_NAME) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as index_file: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + index_file.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + for shard_file, shard in shards.items(): + with h5py.File(os.path.join(save_directory, shard_file), mode="w") as shard_file: + layers = [] + for layer in sorted(shard, key=lambda x: x.name): + if "model." in layer.name or len(layer.name.split("/")) == 1: + layer_name = layer.name + else: + layer_name = "/".join(layer.name.split("/")[1:]) + param_dset = shard_file.create_dataset( + layer_name, layer.numpy().shape, dtype=layer.numpy().dtype + ) + param_dset[:] = layer.numpy() + layers.append(layer_name.encode("utf8")) + save_attributes_to_hdf5_group(shard_file, "layer_names", layers) + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + *model_args, + config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + ignore_mismatched_sizes: bool = False, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ): + r""" + Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_name_or_path (`str`, *optional*): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this + case, `from_pt` should be set to `True` and a configuration object should be provided as `config` + argument. This loading path is slower than converting the PyTorch model in a TensorFlow model + using the provided conversion scripts and loading the TensorFlow model afterwards. + - `None` if you are both providing the configuration and state dictionary (resp. with keyword + arguments `config` and `state_dict`). + model_args (sequence of positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + config (`Union[PretrainedConfig, str]`, *optional*): + Can be either: + + - an instance of a class derived from [`PretrainedConfig`], + - a string valid as input to [`~PretrainedConfig.from_pretrained`]. + + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + from_pt (`bool`, *optional*, defaults to `False`): + Load the model weights from a PyTorch state_dict save file (see docstring of + `pretrained_model_name_or_path` argument). + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): + Whether or not to raise an error if some of the weights from the checkpoint do not have the same size + as the weights of the model (if for instance, you are instantiating a model with 10 labels from a + checkpoint with 3 labels). + cache_dir (`str`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies: + (`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., + `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a + dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (e.g., not try downloading the model). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + mirror (`str`, *optional*): + Mirror source to accelerate downloads in China. If you are from China and have an accessibility + problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. + Please refer to the mirror site for more information. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + tf_to_pt_weight_rename (`Callable`, *optional*): + A function that is called to transform the names of weights during the PyTorch to TensorFlow + crossloading process. This is not necessary for most models, but is useful to allow composite models to + be crossloaded correctly. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + Examples: + + ```python + >>> from transformers import BertConfig, TFBertModel + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = TFBertModel.from_pretrained("bert-base-uncased") + >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). + >>> model = TFBertModel.from_pretrained("./test/saved_model/") + >>> # Update configuration during loading. + >>> model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True) + >>> assert model.config.output_attentions == True + >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). + >>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json") + >>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config) + ```""" + from_pt = kwargs.pop("from_pt", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + output_loading_info = kwargs.pop("output_loading_info", False) + use_auth_token = kwargs.pop("use_auth_token", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + _ = kwargs.pop("mirror", None) + load_weight_prefix = kwargs.pop("load_weight_prefix", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) + tf_to_pt_weight_rename = kwargs.pop("tf_to_pt_weight_rename", None) + + # Not relevant for TF models + _ = kwargs.pop("adapter_kwargs", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if trust_remote_code is True: + logger.warning( + "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" + " ignored." + ) + + user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Load config if we don't provide a configuration + if not isinstance(config, PretrainedConfig): + config_path = config if config is not None else pretrained_model_name_or_path + config, model_kwargs = cls.config_class.from_pretrained( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + _commit_hash=commit_hash, + **kwargs, + ) + else: + model_kwargs = kwargs + + if commit_hash is None: + commit_hash = getattr(config, "_commit_hash", None) + + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the + # index of the files. + is_sharded = False + # Load model + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): + # Load from a PyTorch checkpoint in priority if from_pt + archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) + elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): + # Load from a sharded PyTorch checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) + is_sharded = True + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): + # Load from a TF 2.0 checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)): + # Load from a sharded TF 2.0 checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME) + is_sharded = True + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + is_sharded = True + raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!") + # At this stage we don't have a weight file so we will raise an error. + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile( + os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) + ): + raise EnvironmentError( + f"Error no file named {TF2_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " + "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " + "weights." + ) + else: + raise EnvironmentError( + f"Error no file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " + f"{pretrained_model_name_or_path}." + ) + elif os.path.isfile(pretrained_model_name_or_path): + archive_file = pretrained_model_name_or_path + is_local = True + elif os.path.isfile(pretrained_model_name_or_path + ".index"): + archive_file = pretrained_model_name_or_path + ".index" + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + filename = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) + else: + # set correct filename + if from_pt: + filename = WEIGHTS_NAME + elif is_safetensors_available(): + filename = SAFE_WEIGHTS_NAME + else: + filename = TF2_WEIGHTS_NAME + + try: + # Load from URL or cache if already cached + cached_file_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "resume_download": resume_download, + "local_files_only": local_files_only, + "token": token, + "user_agent": user_agent, + "revision": revision, + "subfolder": subfolder, + "_raise_exceptions_for_missing_entries": False, + "_commit_hash": commit_hash, + } + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: + # Did not find the safetensors file, let's fallback to TF. + # No support for sharded safetensors yet, so we'll raise an error if that's all we find. + filename = TF2_WEIGHTS_NAME + resolved_archive_file = cached_file( + pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs + ) + if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs + ) + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None and filename == WEIGHTS_NAME: + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs + ) + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None: + # Otherwise, maybe there is a PyTorch or Flax model file. We try those to give a helpful error + # message. + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "token": token, + } + if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): + is_sharded = True + raise NotImplementedError( + "Support for sharded checkpoints using safetensors is coming soon!" + ) + elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" + " load this model from those weights." + ) + else: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}," + f" {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" + ) + + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" + ) + if is_local: + logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file + filename = resolved_archive_file.split(os.path.sep)[-1] + else: + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") + else: + resolved_archive_file = None + + # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. + if is_sharded: + # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. + resolved_archive_file, _ = get_checkpoint_shard_files( + pretrained_model_name_or_path, + resolved_archive_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + _commit_hash=commit_hash, + ) + + safetensors_from_pt = False + if filename == SAFE_WEIGHTS_NAME: + with safe_open(resolved_archive_file, framework="tf") as f: + safetensors_metadata = f.metadata() + if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." + " Make sure you save your model with the `save_pretrained` method." + ) + safetensors_from_pt = safetensors_metadata.get("format") == "pt" + + config.name_or_path = pretrained_model_name_or_path + + # composed models, *e.g.* TFRag, require special treatment when it comes to loading + # pre-trained weights. + if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: + model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") + + # Instantiate model. + model = cls(config, *model_args, **model_kwargs) + + if tf_to_pt_weight_rename is None and hasattr(model, "tf_to_pt_weight_rename"): + # TODO Matt: This is a temporary workaround to allow weight renaming, but requires a method + # to be defined for each class that requires a rename. We can probably just have a class-level + # dict and a single top-level method or something and cut down a lot of boilerplate code + tf_to_pt_weight_rename = model.tf_to_pt_weight_rename + + if from_pt: + from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model + + # Load from a PyTorch checkpoint + return load_pytorch_checkpoint_in_tf2_model( + model, + resolved_archive_file, + allow_missing_keys=True, + output_loading_info=output_loading_info, + _prefix=load_weight_prefix, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ) + + # we might need to extend the variable scope for composite models + if load_weight_prefix is not None: + with tf.compat.v1.variable_scope(load_weight_prefix): + model.build_in_name_scope() # build the network with dummy inputs + else: + model.build_in_name_scope() # build the network with dummy inputs + + if safetensors_from_pt: + from .modeling_tf_pytorch_utils import load_pytorch_state_dict_in_tf2_model + + with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: + # Load from a PyTorch checkpoint + # We load in TF format here because PT weights often need to be transposed, and this is much + # faster on GPU. Loading as numpy and transposing on CPU adds several seconds to load times. + return load_pytorch_state_dict_in_tf2_model( + model, + safetensors_archive, + tf_inputs=False, # No need to build the model again + allow_missing_keys=True, + output_loading_info=output_loading_info, + _prefix=load_weight_prefix, + ignore_mismatched_sizes=ignore_mismatched_sizes, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ) + + # 'by_name' allow us to do transfer learning by skipping/adding layers + # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 + try: + if is_sharded: + for file in resolved_archive_file: + os.path.isfile(file), f"Error retrieving files {file}" + + missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights( + model, + resolved_archive_file, + ignore_mismatched_sizes=ignore_mismatched_sizes, + _prefix=load_weight_prefix, + ) + else: + missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( + model, + resolved_archive_file, + ignore_mismatched_sizes=ignore_mismatched_sizes, + _prefix=load_weight_prefix, + ) + except OSError as e: + try: + with open(resolved_archive_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise OSError( + "Unable to load weights from h5 file. " + "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " + ) + + if cls._keys_to_ignore_on_load_missing is not None: + for pat in cls._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + + if cls._keys_to_ignore_on_load_unexpected is not None: + for pat in cls._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" + " with another architecture (e.g. initializing a BertForSequenceClassification model from a" + " BertForPreTraining model).\n- This IS NOT expected if you are initializing" + f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" + " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." + ) + else: + logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") + + if len(missing_keys) > 0: + logger.warning( + f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.warning( + f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" + f" was trained on, you can already use {model.__class__.__name__} for predictions without further" + " training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + # If it is a model with generation capabilities, attempt to load the generation config + if model.can_generate(): + try: + model.generation_config = GenerationConfig.from_pretrained( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + except OSError: + logger.info( + "Generation config file not found, using a generation config created from the model config." + ) + pass + + if output_loading_info: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + } + + return model, loading_info + + return model + + def push_to_hub( + self, + repo_id: str, + use_temp_dir: Optional[bool] = None, + commit_message: Optional[str] = None, + private: Optional[bool] = None, + max_shard_size: Optional[Union[int, str]] = "10GB", + token: Optional[Union[bool, str]] = None, + # (`use_auth_token` is deprecated: we have to keep it here as we don't have **kwargs) + use_auth_token: Optional[Union[bool, str]] = None, + create_pr: bool = False, + **base_model_card_args, + ) -> str: + """ + Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`. + + Parameters: + repo_id (`str`): + The name of the repository you want to push your model to. It should contain your organization name + when pushing to a given organization. + use_temp_dir (`bool`, *optional*): + Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. + Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. + commit_message (`str`, *optional*): + Message to commit while pushing. Will default to `"Upload model"`. + private (`bool`, *optional*): + Whether or not the repository created should be private. + token (`bool` or `str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` + is not specified. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard + will then be each of size lower than this size. If expressed as a string, needs to be digits followed + by a unit (like `"5MB"`). + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + + Examples: + + ```python + from transformers import TFAutoModel + + model = TFAutoModel.from_pretrained("bert-base-cased") + + # Push the model to your namespace with the name "my-finetuned-bert". + model.push_to_hub("my-finetuned-bert") + + # Push the model to an organization with the name "my-finetuned-bert". + model.push_to_hub("huggingface/my-finetuned-bert") + ``` + """ + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if "repo_path_or_name" in base_model_card_args: + warnings.warn( + "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " + "`repo_id` instead." + ) + repo_id = base_model_card_args.pop("repo_path_or_name") + # Deprecation warning will be sent after for repo_url and organization + repo_url = base_model_card_args.pop("repo_url", None) + organization = base_model_card_args.pop("organization", None) + + if os.path.isdir(repo_id): + working_dir = repo_id + repo_id = repo_id.split(os.path.sep)[-1] + else: + working_dir = repo_id.split("/")[-1] + + repo_id = self._create_repo( + repo_id, private=private, token=token, repo_url=repo_url, organization=organization + ) + + if use_temp_dir is None: + use_temp_dir = not os.path.isdir(working_dir) + + with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: + files_timestamps = self._get_files_timestamps(work_dir) + + # Save all files. + self.save_pretrained(work_dir, max_shard_size=max_shard_size) + if hasattr(self, "history") and hasattr(self, "create_model_card"): + # This is a Keras model and we might be able to fish out its History and make a model card out of it + base_model_card_args = { + "output_dir": work_dir, + "model_name": Path(repo_id).name, + } + base_model_card_args.update(base_model_card_args) + self.create_model_card(**base_model_card_args) + + self._upload_modified_files( + work_dir, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + create_pr=create_pr, + ) + + @classmethod + def register_for_auto_class(cls, auto_class="TFAutoModel"): + """ + Register this class with a given auto class. This should only be used for custom models as the ones in the + library are already mapped with an auto class. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`): + The auto class to register this new model with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + +class TFConv1D(tf.keras.layers.Layer): + """ + 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). + + Basically works like a linear layer but the weights are transposed. + + Args: + nf (`int`): + The number of output features. + nx (`int`): + The number of input features. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation to use to initialize the weights. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. + """ + + def __init__(self, nf, nx, initializer_range=0.02, **kwargs): + super().__init__(**kwargs) + self.nf = nf + self.nx = nx + self.initializer_range = initializer_range + + def build(self, input_shape): + if self.built: + return + self.built = True + self.weight = self.add_weight( + "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) + ) + self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) + + def call(self, x): + bz, sl = shape_list(x)[:2] + + x = tf.reshape(x, [-1, self.nx]) + x = tf.matmul(x, self.weight) + self.bias + + x = tf.reshape(x, [bz, sl, self.nf]) + + return x + + +class TFSharedEmbeddings(tf.keras.layers.Layer): + r""" + Construct shared token embeddings. + + The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language + modeling. + + Args: + vocab_size (`int`): + The size of the vocabulary, e.g., the number of unique tokens. + hidden_size (`int`): + The size of the embedding vectors. + initializer_range (`float`, *optional*): + The standard deviation to use when initializing the weights. If no value is provided, it will default to + \\(1/\sqrt{hidden\_size}\\). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. + """ + + # TODO (joao): flagged for delection due to embeddings refactor + + def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): + super().__init__(**kwargs) + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range + warnings.warn( + "`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `tf.keras.layers.Embedding` instead.", + DeprecationWarning, + ) + + def build(self, input_shape): + """ + Build shared token embedding layer Shared weights logic adapted from + https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 + """ + self.weight = self.add_weight( + "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) + ) + super().build(input_shape) + + def get_config(self): + config = { + "vocab_size": self.vocab_size, + "hidden_size": self.hidden_size, + "initializer_range": self.initializer_range, + } + base_config = super().get_config() + + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: + """ + Get token embeddings of inputs or decode final hidden state. + + Args: + inputs (`tf.Tensor`): + In embedding mode, should be an int64 tensor with shape `[batch_size, length]`. + + In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`. + mode (`str`, defaults to `"embedding"`): + A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be + used as an embedding layer, the second one that the layer should be used as a linear decoder. + + Returns: + `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length, + embedding_size]`. + + In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`. + + Raises: + ValueError: if `mode` is not valid. + + Shared weights logic is adapted from + [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24). + """ + if mode == "embedding": + return self._embedding(inputs) + elif mode == "linear": + return self._linear(inputs) + else: + raise ValueError(f"mode {mode} is not valid.") + + def _embedding(self, input_ids): + """Applies embedding based on inputs tensor.""" + return tf.gather(self.weight, input_ids) + + def _linear(self, inputs): + """ + Computes logits by running inputs through a linear layer. + + Args: + inputs: A float32 tensor with shape [..., hidden_size] + + Returns: + float32 tensor with shape [..., vocab_size]. + """ + first_dims = shape_list(inputs)[:-1] + x = tf.reshape(inputs, [-1, self.hidden_size]) + logits = tf.matmul(x, self.weight, transpose_b=True) + + return tf.reshape(logits, first_dims + [self.vocab_size]) + + +class TFSequenceSummary(tf.keras.layers.Layer): + """ + Compute a single vector summary of a sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model. Relevant arguments in the config class of the model are (refer to the actual + config class of your model for the default values it uses): + + - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: + + - `"last"` -- Take the last token hidden state (like XLNet) + - `"first"` -- Take the first token hidden state (like Bert) + - `"mean"` -- Take the mean of all tokens hidden states + - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) + - `"attn"` -- Not implemented now, use multi-head attention + + - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. + - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes + (otherwise to `config.hidden_size`). + - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, + another string or `None` will add no activation. + - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. + - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. + + initializer_range (`float`, defaults to 0.02): The standard deviation to use to initialize the weights. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. + """ + + def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): + super().__init__(**kwargs) + + self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" + if self.summary_type == "attn": + # We should use a standard multi-head attention module with absolute positional embedding for that. + # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 + # We can probably just use the multi-head attention module of PyTorch >=1.1.0 + raise NotImplementedError + + self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj + if self.has_summary: + if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: + num_classes = config.num_labels + else: + num_classes = config.hidden_size + self.summary = tf.keras.layers.Dense( + num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" + ) + + self.has_activation = False + activation_string = getattr(config, "summary_activation", None) + if activation_string is not None: + self.has_activation = True + self.activation = get_tf_activation(activation_string) + + self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 + if self.has_first_dropout: + self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout) + + self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 + if self.has_last_dropout: + self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout) + self.hidden_size = config.hidden_size + + def call(self, inputs, cls_index=None, training=False): + if not isinstance(inputs, (dict, tuple, list)): + hidden_states = inputs + elif isinstance(inputs, (tuple, list)): + hidden_states = inputs[0] + cls_index = inputs[1] if len(inputs) > 1 else None + assert len(inputs) <= 2, "Too many inputs." + else: + hidden_states = inputs.get("hidden_states") + cls_index = inputs.get("cls_index", None) + + if self.summary_type == "last": + output = hidden_states[:, -1] + elif self.summary_type == "first": + output = hidden_states[:, 0] + elif self.summary_type == "mean": + output = tf.reduce_mean(hidden_states, axis=1) + elif self.summary_type == "cls_index": + hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] + if cls_index is None: + cls_index = tf.fill( + hidden_shape[:-2], hidden_shape[-2] - 1 + ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length + cls_shape = shape_list(cls_index) + if len(cls_shape) <= len(hidden_shape) - 2: + cls_index = tf.expand_dims(cls_index, axis=-1) + # else: + # cls_index = cls_index[..., tf.newaxis] + # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) + # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states + output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) + output = tf.squeeze( + output, axis=len(hidden_shape) - 2 + ) # shape of output: (batch, num choices, hidden_size) + elif self.summary_type == "attn": + raise NotImplementedError + + if self.has_first_dropout: + output = self.first_dropout(output, training=training) + + if self.has_summary: + output = self.summary(output) + + if self.has_activation: + output = self.activation(output) + + if self.has_last_dropout: + output = self.last_dropout(output, training=training) + + return output + + def build(self, input_shape): + if self.built: + return + self.built = True + if getattr(self, "summary", None) is not None: + with tf.name_scope("summary"): + self.summary.build(self.hidden_size) + + +def get_initializer(initializer_range: float = 0.02) -> tf.keras.initializers.TruncatedNormal: + """ + Creates a `tf.keras.initializers.TruncatedNormal` with the given range. + + Args: + initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range. + + Returns: + `tf.keras.initializers.TruncatedNormal`: The truncated normal initializer. + """ + return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) diff --git a/modified/modeling_utils.py b/modified/modeling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7e5d3e54e619e8472255235471367efabd3a9573 --- /dev/null +++ b/modified/modeling_utils.py @@ -0,0 +1,4841 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +import copy +import functools +import gc +import importlib.metadata +import inspect +import json +import os +import re +import shutil +import tempfile +import warnings +from contextlib import contextmanager +from dataclasses import dataclass +from functools import partial, wraps +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from packaging import version +from torch import Tensor, nn +from torch.nn import CrossEntropyLoss, Identity +from torch.utils.checkpoint import checkpoint + +from .activations import get_activation +from .configuration_utils import PretrainedConfig +from .dynamic_module_utils import custom_object_save +from .generation import GenerationConfig, GenerationMixin +from .integrations import PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled +from .pytorch_utils import ( # noqa: F401 + Conv1D, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + id_tensor_storage, + prune_conv1d_layer, + prune_layer, + prune_linear_layer, +) +from .safetensors_conversion import auto_conversion +from .utils import ( + ADAPTER_SAFE_WEIGHTS_NAME, + ADAPTER_WEIGHTS_NAME, + CONFIG_NAME, + DUMMY_INPUTS, + FLAX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + TF2_WEIGHTS_NAME, + TF_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + ContextManagers, + ModelOutput, + PushToHubMixin, + cached_file, + copy_func, + download_url, + extract_commit_hash, + has_file, + is_accelerate_available, + is_auto_awq_available, + is_auto_gptq_available, + is_bitsandbytes_available, + is_flash_attn_2_available, + is_offline_mode, + is_optimum_available, + is_peft_available, + is_remote_url, + is_safetensors_available, + is_torch_sdpa_available, + is_torch_tpu_available, + logging, + replace_return_docstrings, + strtobool, +) +from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files +from .utils.import_utils import ( + ENV_VARS_TRUE_VALUES, + is_sagemaker_mp_enabled, + is_torch_fx_proxy, + is_torchdynamo_compiling, +) +from .utils.quantization_config import AwqConfig, BitsAndBytesConfig, GPTQConfig, QuantizationMethod +from .utils.versions import require_version_core + + +XLA_USE_BF16 = os.environ.get("XLA_USE_BF16", "0").upper() +XLA_DOWNCAST_BF16 = os.environ.get("XLA_DOWNCAST_BF16", "0").upper() + +if is_accelerate_available(): + from accelerate import dispatch_model, infer_auto_device_map, init_empty_weights + from accelerate.hooks import add_hook_to_module + from accelerate.utils import ( + check_tied_parameters_on_same_device, + find_tied_parameters, + get_balanced_memory, + get_max_memory, + load_offloaded_weights, + offload_weight, + save_offload_index, + set_module_tensor_to_device, + ) + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.torch import load_file as safe_load_file + from safetensors.torch import save_file as safe_save_file + +logger = logging.get_logger(__name__) + + +_init_weights = True + + +def is_fsdp_enabled(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and strtobool(os.environ.get("ACCELERATE_USE_FSDP", "False")) == 1 + and strtobool(os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING", "False")) == 1 + ) + + +def is_local_dist_rank_0(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and int(os.environ.get("LOCAL_RANK", -1)) == 0 + ) + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") +else: + IS_SAGEMAKER_MP_POST_1_10 = False + +if is_peft_available(): + from .utils import find_adapter_config_file + +TORCH_INIT_FUNCTIONS = { + "uniform_": nn.init.uniform_, + "normal_": nn.init.normal_, + "trunc_normal_": nn.init.trunc_normal_, + "constant_": nn.init.constant_, + "xavier_uniform_": nn.init.xavier_uniform_, + "xavier_normal_": nn.init.xavier_normal_, + "kaiming_uniform_": nn.init.kaiming_uniform_, + "kaiming_normal_": nn.init.kaiming_normal_, + "uniform": nn.init.uniform, + "normal": nn.init.normal, + "xavier_uniform": nn.init.xavier_uniform, + "xavier_normal": nn.init.xavier_normal, + "kaiming_uniform": nn.init.kaiming_uniform, + "kaiming_normal": nn.init.kaiming_normal, +} + + +@contextmanager +def no_init_weights(_enable=True): + """ + Context manager to globally disable weight initialization to speed up loading large models. + + TODO(Patrick): Delete safety argument `_enable=True` at next major version. . + """ + global _init_weights + old_init_weights = _init_weights + + if _enable: + _init_weights = False + + def _skip_init(*args, **kwargs): + pass + + # # Save the original initialization functions + for name, init_func in TORCH_INIT_FUNCTIONS.items(): + setattr(torch.nn.init, name, _skip_init) + try: + yield + finally: + _init_weights = old_init_weights + if _enable: + # # Restore the original initialization functions + for name, init_func in TORCH_INIT_FUNCTIONS.items(): + setattr(torch.nn.init, name, init_func) + + +def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + try: + return next(parameter.parameters()).device + except StopIteration: + # For nn.DataParallel compatibility in PyTorch 1.5 + + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].device + + +def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + """ + Returns the first parameter dtype (can be non-floating) or asserts if none were found. + """ + try: + return next(parameter.parameters()).dtype + except StopIteration: + # For nn.DataParallel compatibility in PyTorch > 1.5 + + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].dtype + + +def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + """ + Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found. + """ + last_dtype = None + for t in parameter.parameters(): + last_dtype = t.dtype + if t.is_floating_point(): + # Adding fix for https://github.com/pytorch/xla/issues/4152 + # Fixes issue where the model code passes a value that is out of range for XLA_USE_BF16=1 + # and XLA_DOWNCAST_BF16=1 so the conversion would cast it to -inf + # NOTE: `is_torch_tpu_available()` is checked last as it induces a graph break in torch dynamo + if XLA_USE_BF16 in ENV_VARS_TRUE_VALUES and is_torch_tpu_available(): + return torch.bfloat16 + if XLA_DOWNCAST_BF16 in ENV_VARS_TRUE_VALUES and is_torch_tpu_available(): + if t.dtype == torch.float: + return torch.bfloat16 + if t.dtype == torch.double: + return torch.float32 + return t.dtype + + if last_dtype is not None: + # if no floating dtype was found return whatever the first dtype is + return last_dtype + + # For nn.DataParallel compatibility in PyTorch > 1.5 + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + last_tuple = None + for tuple in gen: + last_tuple = tuple + if tuple[1].is_floating_point(): + return tuple[1].dtype + + if last_tuple is not None: + # fallback to the last dtype + return last_tuple[1].dtype + + # fallback to buffer dtype + for t in parameter.buffers(): + last_dtype = t.dtype + if t.is_floating_point(): + return t.dtype + return last_dtype + + +def get_state_dict_float_dtype(state_dict): + """ + Returns the first found floating dtype in `state_dict` or asserts if none were found. + """ + for t in state_dict.values(): + if t.is_floating_point(): + return t.dtype + + raise ValueError("couldn't find any floating point dtypes in state_dict") + + +def get_state_dict_dtype(state_dict): + """ + Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype. + """ + for t in state_dict.values(): + if t.is_floating_point(): + return t.dtype + + # if no floating dtype was found return whatever the first dtype is + else: + return next(state_dict.values()).dtype + + +def dtype_byte_size(dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(torch.float32) + 4 + ``` + """ + if dtype == torch.bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", str(dtype)) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def shard_checkpoint( + state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME +): + """ + Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a + given size. + + The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no + optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the + limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], + [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's weight is bigger than `max_shard_size`, it will end up in its own sub-checkpoint which will + have a size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit + (like `"5MB"`). + weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): + The name of the model save file. + """ + max_shard_size = convert_file_size_to_int(max_shard_size) + + sharded_state_dicts = [{}] + last_block_size = 0 + total_size = 0 + storage_id_to_block = {} + + for key, weight in state_dict.items(): + # when bnb serialization is used the weights in the state dict can be strings + # check: https://github.com/huggingface/transformers/pull/24416 for more details + if isinstance(weight, str): + continue + else: + storage_id = id_tensor_storage(weight) + + # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block` + if storage_id in storage_id_to_block: + block_id = storage_id_to_block[storage_id] + sharded_state_dicts[block_id][key] = weight + continue + + weight_size = weight.numel() * dtype_byte_size(weight.dtype) + + # If this weight is going to tip up over the maximal size, we split, but only if we have put at least one + # weight in the current shard. + if last_block_size + weight_size > max_shard_size and len(sharded_state_dicts[-1]) > 0: + sharded_state_dicts.append({}) + last_block_size = 0 + + sharded_state_dicts[-1][key] = weight + last_block_size += weight_size + total_size += weight_size + storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 + + # If we only have one shard, we return it + if len(sharded_state_dicts) == 1: + return {weights_name: sharded_state_dicts[0]}, None + + # Otherwise, let's build the index + weight_map = {} + shards = {} + for idx, shard in enumerate(sharded_state_dicts): + shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin") + shard_file = shard_file.replace( + ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" + ) + shards[shard_file] = shard + for key in shard.keys(): + weight_map[key] = shard_file + + # Add the metadata + metadata = {"total_size": total_size} + index = {"metadata": metadata, "weight_map": weight_map} + return shards, index + + +def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True): + """ + This is the same as + [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) + but for a sharded checkpoint. + + This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being + loaded in the model. + + Args: + model (`torch.nn.Module`): The model in which to load the checkpoint. + folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. + strict (`bool`, *optional`, defaults to `True`): + Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. + prefer_safe (`bool`, *optional*, defaults to `False`) + If both safetensors and PyTorch save files are present in checkpoint and `prefer_safe` is True, the + safetensors files will be loaded. Otherwise, PyTorch files are always loaded when possible. + + Returns: + `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields + - `missing_keys` is a list of str containing the missing keys + - `unexpected_keys` is a list of str containing the unexpected keys + """ + # Load the index + index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) + safe_index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) + + index_present = os.path.isfile(index_file) + safe_index_present = os.path.isfile(safe_index_file) + + if not index_present and not (safe_index_present and is_safetensors_available()): + filenames = ( + (WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME) if is_safetensors_available() else (WEIGHTS_INDEX_NAME,) + ) + raise ValueError(f"Can't find a checkpoint index ({' or '.join(filenames)}) in {folder}.") + + load_safe = False + if safe_index_present: + if prefer_safe: + if is_safetensors_available(): + load_safe = True # load safe due to preference + else: + logger.warning( + f"Cannot load sharded checkpoint at {folder} safely since safetensors is not installed!" + ) + elif not index_present: + load_safe = True # load safe since we have no other choice + + load_index = safe_index_file if load_safe else index_file + + with open(load_index, "r", encoding="utf-8") as f: + index = json.load(f) + + shard_files = list(set(index["weight_map"].values())) + + # If strict=True, error before loading any of the state dicts. + loaded_keys = index["weight_map"].keys() + model_keys = model.state_dict().keys() + missing_keys = [key for key in model_keys if key not in loaded_keys] + unexpected_keys = [key for key in loaded_keys if key not in model_keys] + if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): + error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" + if len(missing_keys) > 0: + str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) + error_message += f"\nMissing key(s): {str_missing_keys}." + if len(unexpected_keys) > 0: + str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) + error_message += f"\nMissing key(s): {str_unexpected_keys}." + raise RuntimeError(error_message) + + loader = safe_load_file if load_safe else partial(torch.load, map_location="cpu") + + for shard_file in shard_files: + state_dict = loader(os.path.join(folder, shard_file)) + model.load_state_dict(state_dict, strict=False) + + # Make sure memory is freed before we load the next state dict. + del state_dict + gc.collect() + + # Return the same thing as PyTorch load_state_dict function. + return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) + + +def load_state_dict(checkpoint_file: Union[str, os.PathLike]): + """ + Reads a PyTorch checkpoint file, returning properly formatted errors if they arise. + """ + if checkpoint_file.endswith(".safetensors") and is_safetensors_available(): + # Check format of the archive + with safe_open(checkpoint_file, framework="pt") as f: + metadata = f.metadata() + if metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " + "you save your model with the `save_pretrained` method." + ) + return safe_load_file(checkpoint_file) + try: + if ( + is_deepspeed_zero3_enabled() and torch.distributed.is_initialized() and torch.distributed.get_rank() > 0 + ) or (is_fsdp_enabled() and not is_local_dist_rank_0()): + map_location = "meta" + else: + map_location = "cpu" + + return torch.load(checkpoint_file, map_location=map_location) + except Exception as e: + try: + with open(checkpoint_file) as f: + if f.read(7) == "version": + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError( + f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " + "model. Make sure you have saved the model properly." + ) from e + except (UnicodeDecodeError, ValueError): + raise OSError( + f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' " + f"at '{checkpoint_file}'. " + "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." + ) + + +def set_initialized_submodules(model, state_dict_keys): + """ + Sets the `_is_hf_initialized` flag in all submodules of a given model when all its weights are in the loaded state + dict. + """ + for module_name, module in model.named_modules(): + loaded_keys = [k.replace(f"{module_name}.", "") for k in state_dict_keys if k.startswith(f"{module_name}.")] + if len(set(module.state_dict().keys()) - set(loaded_keys)) == 0: + module._is_hf_initialized = True + + +def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): + # Convert old format to new format if needed from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, "_metadata", None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + error_msgs = [] + + # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants + # so we need to apply the function recursively. + def load(module: nn.Module, state_dict, prefix=""): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) + # Parameters of module and children will start with prefix. We can exit early if there are none in this + # state_dict + if len([key for key in state_dict if key.startswith(prefix)]) > 0: + if is_deepspeed_zero3_enabled(): + import deepspeed + + # In sharded models, each shard has only part of the full state_dict, so only gather + # parameters that are in the current state_dict. + named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) + params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] + if len(params_to_gather) > 0: + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): + if torch.distributed.get_rank() == 0: + module._load_from_state_dict(*args) + else: + module._load_from_state_dict(*args) + + for name, child in module._modules.items(): + if child is not None: + load(child, state_dict, prefix + name + ".") + + load(model_to_load, state_dict, prefix=start_prefix) + # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so + # it's safe to delete it. + del state_dict + + return error_msgs + + +def find_submodule_and_param_name(model, long_key, start_prefix): + """ + A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed + from the start of the key + """ + + if len(start_prefix) > 0 and long_key.startswith(start_prefix): + long_key = ".".join(long_key.split(".")[1:]) + + split_key = long_key.split(".") + submodule = model + while len(split_key) > 1: + if hasattr(submodule, split_key[0]): + submodule = getattr(submodule, split_key[0]) + del split_key[0] + else: + submodule = None + break + if submodule == model: + submodule = None + return submodule, split_key[0] + + +def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix): + """ + Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params. + + `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in + `bert.pooler.dense.weight` + + """ + + # dematerialize param storage for keys that are going to be replaced by state_dict, by + # putting those on the meta device + for k in loaded_state_dict_keys: + submodule, param_name = find_submodule_and_param_name(model, k, start_prefix) + if submodule is not None: + # selectively switch to the meta device only those params/buffers that will + # be next replaced from state_dict. This a complex way to do p.to_("meta") + # since we have no in-place to_ for tensors. + new_val = getattr(submodule, param_name) + if isinstance(new_val, torch.nn.Parameter): + # isinstance returns False for Params on meta device, so switch after the check + new_val = torch.nn.Parameter(new_val.to("meta")) + else: + new_val = new_val.to("meta") + setattr(submodule, param_name, new_val) + + +def _load_state_dict_into_meta_model( + model, + state_dict, + loaded_state_dict_keys, # left for now but could be removed, see below + start_prefix, + expected_keys, + device_map=None, + offload_folder=None, + offload_index=None, + state_dict_folder=None, + state_dict_index=None, + dtype=None, + is_quantized=False, + is_safetensors=False, + keep_in_fp32_modules=None, +): + """ + This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its + params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the + params back to the normal device, but only for `loaded_state_dict_keys`. + + `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in + `bert.pooler.dense.weight` + + """ + + # XXX: remaining features to implement to be fully compatible with _load_state_dict_into_model + # - deepspeed zero 3 support + # - need to copy metadata if any - see _load_state_dict_into_model + # - handling error_msgs - mimicking the error handling in module._load_from_state_dict() + # - Is there a situation where some keys aren't in `loaded_state_dict_keys` and in which case + # they won't get loaded. + + if is_quantized: + from .integrations import set_module_quantized_tensor_to_device + + error_msgs = [] + + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + for param_name, param in state_dict.items(): + # First part of the test is always true as load_state_dict_keys always contains state_dict keys. + if param_name not in loaded_state_dict_keys or param_name not in expected_keys: + continue + + if param_name.startswith(start_prefix): + param_name = param_name[len(start_prefix) :] + + module_name = param_name + set_module_kwargs = {} + + # We convert floating dtypes to the `dtype` passed. We want to keep the buffers/params + # in int/uint/bool and not cast them. + if dtype is not None and torch.is_floating_point(param): + if ( + keep_in_fp32_modules is not None + and any( + module_to_keep_in_fp32 in param_name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules + ) + and dtype == torch.float16 + ): + param = param.to(torch.float32) + + # For backward compatibility with older versions of `accelerate` + # TODO: @sgugger replace this check with version check at the next `accelerate` release + if "dtype" in list(inspect.signature(set_module_tensor_to_device).parameters): + set_module_kwargs["dtype"] = torch.float32 + else: + param = param.to(dtype) + + # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model + if dtype is None: + old_param = model + splits = param_name.split(".") + for split in splits: + old_param = getattr(old_param, split) + if old_param is None: + break + + if old_param is not None: + param = param.to(old_param.dtype) + + set_module_kwargs["value"] = param + + if device_map is None: + param_device = "cpu" + else: + # find next higher level module that is defined in device_map: + # bert.lm_head.weight -> bert.lm_head -> bert -> '' + while len(module_name) > 0 and module_name not in device_map: + module_name = ".".join(module_name.split(".")[:-1]) + if module_name == "" and "" not in device_map: + # TODO: group all errors and raise at the end. + raise ValueError(f"{param_name} doesn't have any device set.") + param_device = device_map[module_name] + + if param_device == "disk": + if not is_safetensors: + offload_index = offload_weight(param, param_name, offload_folder, offload_index) + elif param_device == "cpu" and state_dict_index is not None: + state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) + elif not is_quantized: + # For backward compatibility with older versions of `accelerate` + set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs) + else: + if param.dtype == torch.int8 and param_name.replace("weight", "SCB") in state_dict.keys(): + fp16_statistics = state_dict[param_name.replace("weight", "SCB")] + else: + fp16_statistics = None + + if "SCB" not in param_name: + set_module_quantized_tensor_to_device( + model, param_name, param_device, value=param, fp16_statistics=fp16_statistics + ) + + return error_msgs, offload_index, state_dict_index + + +def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: + if variant is not None: + splits = weights_name.split(".") + splits = splits[:-1] + [variant] + splits[-1:] + weights_name = ".".join(splits) + + return weights_name + + +class ModuleUtilsMixin: + """ + A few utilities for `torch.nn.Modules`, to be used as a mixin. + """ + + @staticmethod + def _hook_rss_memory_pre_forward(module, *args, **kwargs): + try: + import psutil + except ImportError: + raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") + + process = psutil.Process(os.getpid()) + mem = process.memory_info() + module.mem_rss_pre_forward = mem.rss + return None + + @staticmethod + def _hook_rss_memory_post_forward(module, *args, **kwargs): + try: + import psutil + except ImportError: + raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") + + process = psutil.Process(os.getpid()) + mem = process.memory_info() + module.mem_rss_post_forward = mem.rss + mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward + module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) + return None + + def add_memory_hooks(self): + """ + Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. + + Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero + with `model.reset_memory_hooks_state()`. + """ + for module in self.modules(): + module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) + module.register_forward_hook(self._hook_rss_memory_post_forward) + self.reset_memory_hooks_state() + + def reset_memory_hooks_state(self): + """ + Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). + """ + for module in self.modules(): + module.mem_rss_diff = 0 + module.mem_rss_post_forward = 0 + module.mem_rss_pre_forward = 0 + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + return get_parameter_device(self) + + @property + def dtype(self) -> torch.dtype: + """ + `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). + """ + return get_parameter_dtype(self) + + def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: + """ + Invert an attention mask (e.g., switches 0. and 1.). + + Args: + encoder_attention_mask (`torch.Tensor`): An attention mask. + + Returns: + `torch.Tensor`: The inverted attention mask. + """ + if encoder_attention_mask.dim() == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if encoder_attention_mask.dim() == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow + # /transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = (encoder_extended_attention_mask == + # encoder_extended_attention_mask.transpose(-1, -2)) + encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min + + return encoder_extended_attention_mask + + @staticmethod + def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + else: + device = attention_mask.device + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + return extended_attention_mask + + def get_extended_attention_mask( + self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + + Returns: + `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + if dtype is None: + dtype = self.dtype + + if not (attention_mask.dim() == 2 and self.config.is_decoder): + # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder: + extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( + input_shape, attention_mask, device + ) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min + return extended_attention_mask + + def get_head_mask( + self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False + ) -> Tensor: + """ + Prepare the head mask if needed. + + Args: + head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): + The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). + num_hidden_layers (`int`): + The number of hidden layers in the model. + is_attention_chunked (`bool`, *optional*, defaults to `False`): + Whether or not the attentions scores are computed by chunks or not. + + Returns: + `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with + `[None]` for each layer. + """ + if head_mask is not None: + head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) + if is_attention_chunked is True: + head_mask = head_mask.unsqueeze(-1) + else: + head_mask = [None] * num_hidden_layers + + return head_mask + + def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): + """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" + head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility + return head_mask + + def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: + """ + Get number of (optionally, trainable or non-embeddings) parameters in the module. + + Args: + only_trainable (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of trainable parameters + + exclude_embeddings (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of non-embeddings parameters + + Returns: + `int`: The number of parameters. + """ + + if exclude_embeddings: + embedding_param_names = [ + f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) + ] + total_parameters = [ + parameter for name, parameter in self.named_parameters() if name not in embedding_param_names + ] + else: + total_parameters = list(self.parameters()) + + total_numel = [] + is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False) + if is_loaded_in_4bit: + if is_bitsandbytes_available(): + import bitsandbytes as bnb + else: + raise ValueError( + "bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong" + " make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. " + ) + + for param in total_parameters: + if param.requires_grad or not only_trainable: + # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are + # used for the 4bit quantization (uint8 tensors are stored) + if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): + total_numel.append(param.numel() * 2) + else: + total_numel.append(param.numel()) + + return sum(total_numel) + + def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: + """ + Helper function to estimate the total number of tokens from the model inputs. + + Args: + inputs (`dict`): The model inputs. + + Returns: + `int`: The total number of tokens. + """ + if not hasattr(self, "warnings_issued"): + self.warnings_issued = {} + if self.main_input_name in input_dict: + return input_dict[self.main_input_name].numel() + elif "estimate_tokens" not in self.warnings_issued: + logger.warning( + "Could not estimate the number of tokens of the input, floating-point operations will not be computed" + ) + self.warnings_issued["estimate_tokens"] = True + return 0 + + def floating_point_ops( + self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True + ) -> int: + """ + Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a + batch with this transformer model. Default approximation neglects the quadratic dependency on the number of + tokens (valid if `12 * d_model << sequence_length`) as laid out in [this + paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter + re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. + + Args: + batch_size (`int`): + The batch size for the forward pass. + + sequence_length (`int`): + The number of tokens in each line of the batch. + + exclude_embeddings (`bool`, *optional*, defaults to `True`): + Whether or not to count embedding and softmax operations. + + Returns: + `int`: The number of floating-point operations. + """ + + return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) + + +class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): + r""" + Base class for all models. + + [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, + downloading and saving models as well as a few methods common to all models to: + + - resize the input embeddings, + - prune heads in the self-attention heads. + + Class attributes (overridden by derived classes): + + - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class + for this model architecture. + - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model, + taking as arguments: + + - **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint. + - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model. + - **path** (`str`) -- A path to the TensorFlow checkpoint. + + - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived + classes of the same architecture adding modules on top of the base model. + - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. + - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP + models, `pixel_values` for vision models and `input_values` for speech models). + """ + + config_class = None + base_model_prefix = "" + main_input_name = "input_ids" + _auto_class = None + _no_split_modules = None + _skip_keys_device_placement = None + _keep_in_fp32_modules = None + + # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing + # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings. + _keys_to_ignore_on_load_missing = None + # a list of `re` patterns of `state_dict` keys that should be removed from the list of + # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary + # warnings. + _keys_to_ignore_on_load_unexpected = None + # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't + # trained, but which are either deterministic or tied variables) + _keys_to_ignore_on_save = None + # a list of `state_dict` keys that are potentially tied to another key in the state_dict. + _tied_weights_keys = None + + is_parallelizable = False + supports_gradient_checkpointing = False + + # Flash Attention 2 support + _supports_flash_attn_2 = False + + # SDPA support + _supports_sdpa = False + + # Has support for a `Cache` instance as `past_key_values` + _supports_cache_class = False + + @property + def dummy_inputs(self) -> Dict[str, torch.Tensor]: + """ + `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. + """ + return {"input_ids": torch.tensor(DUMMY_INPUTS)} + + @property + def framework(self) -> str: + """ + :str: Identifies that this is a PyTorch model. + """ + return "pt" + + def __init__(self, config: PretrainedConfig, *inputs, **kwargs): + super().__init__() + if not isinstance(config, PretrainedConfig): + raise ValueError( + f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " + "`PretrainedConfig`. To create a model from a pretrained model use " + f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + # Save config and origin of the pretrained weights if given in model + config = self._autoset_attn_implementation( + config, torch_dtype=torch.get_default_dtype(), check_device_map=False + ) + self.config = config + + self.name_or_path = config.name_or_path + self.warnings_issued = {} + self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + # Overwrite the class attribute to make it an instance attribute, so models like + # `InstructBlipForConditionalGeneration` can dynamically update it without modifying the class attribute + # when a different component (e.g. language_model) is used. + self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules) + + def post_init(self): + """ + A method executed at the end of each Transformer model initialization, to execute code that needs the model's + modules properly initialized (such as weight initialization). + """ + self.init_weights() + self._backward_compatibility_gradient_checkpointing() + + def _backward_compatibility_gradient_checkpointing(self): + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() + # Remove the attribute now that is has been consumed, so it's no saved in the config. + delattr(self.config, "gradient_checkpointing") + + @classmethod + def _from_config(cls, config, **kwargs): + """ + All context managers that the model should be initialized under go here. + + Args: + torch_dtype (`torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. + """ + torch_dtype = kwargs.pop("torch_dtype", None) + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) + + # override default dtype if needed + dtype_orig = None + if torch_dtype is not None: + dtype_orig = cls._set_default_torch_dtype(torch_dtype) + + config = copy.deepcopy(config) # We do not want to modify the config inplace in _from_config. + config._attn_implementation = kwargs.pop("attn_implementation", None) + config = cls._autoset_attn_implementation( + config, use_flash_attention_2=use_flash_attention_2, check_device_map=False + ) + + if is_deepspeed_zero3_enabled(): + import deepspeed + + logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") + # this immediately partitions the model across all gpus, to avoid the overhead in time + # and memory copying it on CPU or each GPU first + with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): + model = cls(config, **kwargs) + else: + model = cls(config, **kwargs) + + # restore default dtype if it was modified + if dtype_orig is not None: + torch.set_default_dtype(dtype_orig) + + return model + + @classmethod + def _autoset_attn_implementation( + cls, + config, + use_flash_attention_2: bool = False, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + ): + """ + Automatically checks and dispatches to a default attention implementation. In order of priority: + 1. An implementation specified in `config._attn_implementation` (due for example to the argument attn_implementation="sdpa" in from_pretrained). + 2. DEPRECATED: if use_flash_attention_2 is set to `True` and `flash_attn` is available, flash attention. (`LlamaFlashAttention` for example) + 3. SDPA implementation, if available and supported by the model type. (`LlamaSdpaAttention` for example) + 4. The default model's implementation otherwise (`LlamaAttention` for example) . + """ + # Here we use config._attn_implementation_internal to check whether the attention implementation was explicitely set by the user. + # The property `PretrainedConfig._attn_implementation` is never `None`, for backward compatibility (always fall back on "eager"). + # The `hasattr` here is used as some Transformers tests for some reason do not call PretrainedConfig __init__ (e.g. test_no_super_init_config_and_model) + requested_attn_implementation = None + if hasattr(config, "_attn_implementation_internal") and config._attn_implementation_internal is not None: + if config._attn_implementation != "flash_attention_2" and use_flash_attention_2: + raise ValueError( + f'Both attn_implementation="{config._attn_implementation}" and `use_flash_attention_2=True` were used when loading the model, which are not compatible.' + ' We recommend to just use `attn_implementation="flash_attention_2"` when loading the model.' + ) + + if config._attn_implementation not in ["eager", "sdpa", "flash_attention_2"]: + message = f'Specified `attn_implementation="{config._attn_implementation}"` is not supported. The only possible arguments are `attn_implementation="eager"` (manual attention implementation)' + if cls._supports_flash_attn_2: + message += ', `"attn_implementation=flash_attention_2"` (implementation using flash attention 2)' + if cls._supports_sdpa: + message += ', `"attn_implementation=sdpa"` (implementation using torch.nn.functional.scaled_dot_product_attention)' + raise ValueError(message + ".") + + # If a config is passed with a preset attn_implementation, we skip the automatic dispatch and use the user-provided config, with hard checks that the requested attention implementation is available. + requested_attn_implementation = config._attn_implementation_internal + + if use_flash_attention_2: + logger.warning_once( + 'The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.' + ) + config._attn_implementation = "flash_attention_2" + + if config._attn_implementation == "flash_attention_2": + cls._check_and_enable_flash_attn_2( + config, + torch_dtype=torch_dtype, + device_map=device_map, + hard_check_only=False, + check_device_map=check_device_map, + ) + elif requested_attn_implementation in [None, "sdpa"]: + # use_flash_attention_2 takes priority over SDPA, hence SDPA treated in this elif. + config = cls._check_and_enable_sdpa( + config, hard_check_only=False if requested_attn_implementation is None else True + ) + else: + config._attn_implementation = "eager" + + return config + + @classmethod + def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: + """ + Change the default dtype and return the previous one. This is needed when wanting to instantiate the model + under specific dtype. + + Args: + dtype (`torch.dtype`): + a floating dtype to set to. + + Returns: + `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was + modified. If it wasn't, returns `None`. + + Note `set_default_dtype` currently only works with floating-point types and asserts if for example, + `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. + """ + if not dtype.is_floating_point: + raise ValueError( + f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" + ) + + logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") + dtype_orig = torch.get_default_dtype() + torch.set_default_dtype(dtype) + return dtype_orig + + @property + def base_model(self) -> nn.Module: + """ + `torch.nn.Module`: The main body of the model. + """ + return getattr(self, self.base_model_prefix, self) + + @classmethod + def can_generate(cls) -> bool: + """ + Returns whether this model can generate sequences with `.generate()`. + + Returns: + `bool`: Whether this model can generate sequences with `.generate()`. + """ + # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. + # Alternativelly, the model can also have a custom `generate` function. + if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): + return False + return True + + @classmethod + def _check_and_enable_flash_attn_2( + cls, + config, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + hard_check_only: bool = False, + ) -> PretrainedConfig: + """ + Checks the availability of Flash Attention 2 and compatibility with the current model. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. + """ + if not cls._supports_flash_attn_2: + raise ValueError( + f"{cls.__name__} does not support Flash Attention 2.0 yet. Please open an issue on GitHub to " + "request support for this architecture: https://github.com/huggingface/transformers/issues/new" + ) + + if not is_flash_attn_2_available(): + preface = "FlashAttention2 has been toggled on, but it cannot be used due to the following error:" + install_message = "Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2." + + if importlib.util.find_spec("flash_attn") is None: + raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}") + + flash_attention_version = version.parse(importlib.metadata.version("flash_attn")) + if torch.version.cuda: + if flash_attention_version < version.parse("2.1.0"): + raise ImportError( + f"{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}" + ) + else: + raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") + elif torch.version.hip: + if flash_attention_version < version.parse("2.0.4"): + raise ImportError( + f"{preface} you need flash_attn package version to be greater or equal than 2.0.4. Make sure to have that version installed - detected version {flash_attention_version}. {install_message}" + ) + else: + raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") + + _is_bettertransformer = getattr(cls, "use_bettertransformer", False) + + if _is_bettertransformer: + raise ValueError( + "Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()" + ) + + if torch_dtype is None: + logger.warning( + "You are attempting to use Flash Attention 2.0 without specifying a torch dtype. This might lead to unexpected behaviour" + ) + elif torch_dtype is not None and torch_dtype not in [torch.float16, torch.bfloat16]: + raise ValueError( + f"Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes. You passed {torch_dtype}, this might lead to" + " unexpected behaviour." + ) + + # The check `torch.empty(0).device.type != "cuda"` is needed as the model may be initialized after `torch.set_default_device` has been called, + # or the model may be initialized under the context manager `with torch.device("cuda"):`. + if check_device_map and device_map is None and torch.empty(0).device.type != "cuda": + if torch.cuda.is_available(): + logger.warning( + "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU" + " after initializing it on CPU with `model.to('cuda')`." + ) + else: + raise ValueError( + "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU and with no GPU available. " + "This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map " + "or initialising the model on CPU and then moving it to GPU." + ) + elif ( + check_device_map + and device_map is not None + and isinstance(device_map, dict) + and ("cpu" in device_map.values() or "disk" in device_map.values()) + ): + raise ValueError( + "You are attempting to use Flash Attention 2.0 with a model dispatched on CPU or disk. This is not supported. Please make sure to " + "initialise the model on a GPU by passing a device_map that contains only GPU devices as keys." + ) + if not hard_check_only: + config._attn_implementation = "flash_attention_2" + return config + + @classmethod + def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig: + """ + Checks the availability of SDPA for a given model. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. + """ + if hard_check_only: + if not cls._supports_sdpa: + raise ValueError( + f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please open an issue on GitHub to " + "request support for this architecture: https://github.com/huggingface/transformers/issues/new" + ) + if not is_torch_sdpa_available(): + raise ImportError( + "PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.1.1." + ) + + if not is_torch_sdpa_available() or not cls._supports_sdpa: + return config + + _is_bettertransformer = getattr(cls, "use_bettertransformer", False) + if _is_bettertransformer: + return config + + if not hard_check_only: + config._attn_implementation = "sdpa" + return config + + def enable_input_require_grads(self): + """ + Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping + the model weights fixed. + """ + + def make_inputs_require_grads(module, input, output): + output.requires_grad_(True) + + self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) + + def disable_input_require_grads(self): + """ + Removes the `_require_grads_hook`. + """ + self._require_grads_hook.remove() + + def get_input_embeddings(self) -> nn.Module: + """ + Returns the model's input embeddings. + + Returns: + `nn.Module`: A torch module mapping vocabulary to hidden states. + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + return base_model.get_input_embeddings() + else: + raise NotImplementedError + + def set_input_embeddings(self, value: nn.Module): + """ + Set model's input embeddings. + + Args: + value (`nn.Module`): A module mapping vocabulary to hidden states. + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + base_model.set_input_embeddings(value) + else: + raise NotImplementedError + + def get_output_embeddings(self) -> nn.Module: + """ + Returns the model's output embeddings. + + Returns: + `nn.Module`: A torch module mapping hidden states to vocabulary. + """ + return None # Overwrite for models with output embeddings + + def _init_weights(self, module): + """ + Initialize the weights. This method should be overridden by derived class and is + the only initialization method that will be called when loading a checkpoint + using `from_pretrained`. Any attempt to initialize outside of this function + will be useless as the torch.nn.init function are all replaced with skip. + """ + pass + + def _initialize_weights(self, module): + """ + Initialize the weights if they are not already initialized. + """ + if getattr(module, "_is_hf_initialized", False): + return + self._init_weights(module) + module._is_hf_initialized = True + + def tie_weights(self): + """ + Tie the weights between the input embeddings and the output embeddings. + + If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the + weights instead. + """ + if getattr(self.config, "tie_word_embeddings", True): + output_embeddings = self.get_output_embeddings() + if output_embeddings is not None: + self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) + + if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False): + if hasattr(self, self.base_model_prefix): + self = getattr(self, self.base_model_prefix) + self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix) + + for module in self.modules(): + if hasattr(module, "_tie_weights"): + module._tie_weights() + + @staticmethod + def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str): + uninitialized_encoder_weights: List[str] = [] + if decoder.__class__ != encoder.__class__: + logger.info( + f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder" + " weights are correctly initialized." + ) + + def tie_encoder_to_decoder_recursively( + decoder_pointer: nn.Module, + encoder_pointer: nn.Module, + module_name: str, + uninitialized_encoder_weights: List[str], + depth=0, + ): + assert isinstance(decoder_pointer, nn.Module) and isinstance( + encoder_pointer, nn.Module + ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" + if hasattr(decoder_pointer, "weight"): + assert hasattr(encoder_pointer, "weight") + encoder_pointer.weight = decoder_pointer.weight + if hasattr(decoder_pointer, "bias"): + assert hasattr(encoder_pointer, "bias") + encoder_pointer.bias = decoder_pointer.bias + return + + encoder_modules = encoder_pointer._modules + decoder_modules = decoder_pointer._modules + if len(decoder_modules) > 0: + assert ( + len(encoder_modules) > 0 + ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" + + all_encoder_weights = {module_name + "/" + sub_name for sub_name in encoder_modules.keys()} + encoder_layer_pos = 0 + for name, module in decoder_modules.items(): + if name.isdigit(): + encoder_name = str(int(name) + encoder_layer_pos) + decoder_name = name + if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len( + encoder_modules + ) != len(decoder_modules): + # this can happen if the name corresponds to the position in a list module list of layers + # in this case the decoder has added a cross-attention that the encoder does not have + # thus skip this step and subtract one layer pos from encoder + encoder_layer_pos -= 1 + continue + elif name not in encoder_modules: + continue + elif depth > 500: + raise ValueError( + "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is" + " a circular dependency between two or more `nn.Modules` of your model." + ) + else: + decoder_name = encoder_name = name + tie_encoder_to_decoder_recursively( + decoder_modules[decoder_name], + encoder_modules[encoder_name], + module_name + "/" + name, + uninitialized_encoder_weights, + depth=depth + 1, + ) + all_encoder_weights.remove(module_name + "/" + encoder_name) + + uninitialized_encoder_weights += list(all_encoder_weights) + + # tie weights recursively + tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights) + if len(uninitialized_encoder_weights) > 0: + logger.warning( + f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" + ) + + def _tie_or_clone_weights(self, output_embeddings, input_embeddings): + """Tie or clone module weights depending of whether we are using TorchScript or not""" + if self.config.torchscript: + output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) + else: + output_embeddings.weight = input_embeddings.weight + + if getattr(output_embeddings, "bias", None) is not None: + output_embeddings.bias.data = nn.functional.pad( + output_embeddings.bias.data, + ( + 0, + output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], + ), + "constant", + 0, + ) + if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): + output_embeddings.out_features = input_embeddings.num_embeddings + + def _get_no_split_modules(self, device_map: str): + """ + Get the modules of the model that should not be spit when using device_map. We iterate through the modules to + get the underlying `_no_split_modules`. + + Args: + device_map (`str`): + The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] + + Returns: + `List[str]`: List of modules that should not be split + """ + _no_split_modules = set() + modules_to_check = [self] + while len(modules_to_check) > 0: + module = modules_to_check.pop(-1) + # if the module does not appear in _no_split_modules, we also check the children + if module.__class__.__name__ not in _no_split_modules: + if isinstance(module, PreTrainedModel): + if module._no_split_modules is None: + raise ValueError( + f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " + "class needs to implement the `_no_split_modules` attribute." + ) + else: + _no_split_modules = _no_split_modules | set(module._no_split_modules) + modules_to_check += list(module.children()) + return list(_no_split_modules) + + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None + ) -> nn.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + new_num_tokens (`int`, *optional*): + The number of new tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + Return: + `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. + """ + model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + if new_num_tokens is None and pad_to_multiple_of is None: + return model_embeds + + # Update base model and current model config + self.config.vocab_size = model_embeds.weight.shape[0] + self.vocab_size = model_embeds.weight.shape[0] + + # Tie weights again if needed + self.tie_weights() + + return model_embeds + + def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) + if hasattr(old_embeddings, "_hf_hook"): + hook = old_embeddings._hf_hook + add_hook_to_module(new_embeddings, hook) + old_embeddings_requires_grad = old_embeddings.weight.requires_grad + new_embeddings.requires_grad_(old_embeddings_requires_grad) + self.set_input_embeddings(new_embeddings) + + # Update new_num_tokens with the actual size of new_embeddings + if pad_to_multiple_of is not None: + if is_deepspeed_zero3_enabled(): + import deepspeed + + with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): + new_num_tokens = new_embeddings.weight.shape[0] + else: + new_num_tokens = new_embeddings.weight.shape[0] + + # if word embeddings are not tied, make sure that lm head is resized as well + if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: + old_lm_head = self.get_output_embeddings() + new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) + if hasattr(old_lm_head, "_hf_hook"): + hook = old_lm_head._hf_hook + add_hook_to_module(new_lm_head, hook) + old_lm_head_requires_grad = old_lm_head.weight.requires_grad + new_lm_head.requires_grad_(old_lm_head_requires_grad) + self.set_output_embeddings(new_lm_head) + + return self.get_input_embeddings() + + def _get_resized_embeddings( + self, + old_embeddings: nn.Embedding, + new_num_tokens: Optional[int] = None, + pad_to_multiple_of: Optional[int] = None, + ) -> nn.Embedding: + """ + Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly + initialized vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_embeddings (`torch.nn.Embedding`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + + Return: + `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if + `new_num_tokens` is `None` + """ + + if pad_to_multiple_of is not None: + if not isinstance(pad_to_multiple_of, int): + raise ValueError( + f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer" + ) + if new_num_tokens is None: + new_num_tokens = old_embeddings.weight.shape[0] + new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of + else: + logger.info( + "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" + f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." + " For more details about this, or help on choosing the correct value for resizing, refer to this guide:" + " https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc" + ) + + if new_num_tokens is None: + return old_embeddings + + if is_deepspeed_zero3_enabled(): + import deepspeed + + with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + else: + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + + if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + return old_embeddings + + if not isinstance(old_embeddings, nn.Embedding): + raise TypeError( + f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You" + " should either use a different resize function or make sure that `old_embeddings` are an instance of" + f" {nn.Embedding}." + ) + + # Build new embeddings + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_embeddings = nn.Embedding( + new_num_tokens, + old_embedding_dim, + device=old_embeddings.weight.device, + dtype=old_embeddings.weight.dtype, + ) + + # initialize all new embeddings (in particular added tokens) + self._init_weights(new_embeddings) + + # Copy token embeddings from the previous weights + + # numbers of tokens to copy + n = min(old_num_tokens, new_num_tokens) + + if is_deepspeed_zero3_enabled(): + import deepspeed + + params = [old_embeddings.weight, new_embeddings.weight] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + else: + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + + return new_embeddings + + def _get_resized_lm_head( + self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False + ) -> nn.Linear: + """ + Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_lm_head (`torch.nn.Linear`): + Old lm head liner layer to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the linear matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults + to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, + vocab_size` else `vocab_size, lm_head_dim`. + + Return: + `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is + `None` + """ + if new_num_tokens is None: + return old_lm_head + + if is_deepspeed_zero3_enabled(): + import deepspeed + + with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + else: + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + + if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + return old_lm_head + + if not isinstance(old_lm_head, nn.Linear): + raise TypeError( + f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You" + " should either use a different resize function or make sure that `old_lm_head` are an instance of" + f" {nn.Linear}." + ) + + # Build new lm head + new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) + has_new_lm_head_bias = old_lm_head.bias is not None + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_lm_head = nn.Linear( + *new_lm_head_shape, + bias=has_new_lm_head_bias, + device=old_lm_head.weight.device, + dtype=old_lm_head.weight.dtype, + ) + + # initialize new lm head (in particular added tokens) + self._init_weights(new_lm_head) + + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + + if is_deepspeed_zero3_enabled(): + import deepspeed + + params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + else: + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + + return new_lm_head + + def _copy_lm_head_original_to_resized( + self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ): + # Copy old lm head weights to new lm head + if not transposed: + new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] + else: + new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] + + # Copy bias weights to new lm head + if has_new_lm_head_bias: + new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] + + def resize_position_embeddings(self, new_num_position_embeddings: int): + raise NotImplementedError( + f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " + f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" + ) + + def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: + raise NotImplementedError( + f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " + f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" + ) + + def init_weights(self): + """ + If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any + initialization logic in `_init_weights`. + """ + # Prune heads if needed + if self.config.pruned_heads: + self.prune_heads(self.config.pruned_heads) + + if _init_weights: + # Initialize weights + self.apply(self._initialize_weights) + + # Tie weights should be skipped when not initializing all weights + # since from_pretrained(...) calls tie weights anyways + self.tie_weights() + + def prune_heads(self, heads_to_prune: Dict[int, List[int]]): + """ + Prunes heads of the base model. + + Arguments: + heads_to_prune (`Dict[int, List[int]]`): + Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads + to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on + layer 1 and heads 2 and 3 on layer 2. + """ + # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads + for layer, heads in heads_to_prune.items(): + union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) + self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON + + self.base_model._prune_heads(heads_to_prune) + + def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): + """ + Activates gradient checkpointing for the current model. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + + We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of + the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + + Args: + gradient_checkpointing_kwargs (dict, *optional*): + Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. + """ + if not self.supports_gradient_checkpointing: + raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") + + if gradient_checkpointing_kwargs is None: + gradient_checkpointing_kwargs = {} + + gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) + + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` methid + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) + else: + self.apply(partial(self._set_gradient_checkpointing, value=True)) + logger.warn( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + + if getattr(self, "_hf_peft_config_loaded", False): + # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True + # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 + # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate + # the gradients to make sure the gradient flows. + self.enable_input_require_grads() + + def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint): + is_gradient_checkpointing_set = False + + # Apply it on the top-level module in case the top-level modules supports it + # for example, LongT5Stack inherits from `PreTrainedModel`. + if hasattr(self, "gradient_checkpointing"): + self._gradient_checkpointing_func = gradient_checkpointing_func + self.gradient_checkpointing = enable + is_gradient_checkpointing_set = True + + for module in self.modules(): + if hasattr(module, "gradient_checkpointing"): + module._gradient_checkpointing_func = gradient_checkpointing_func + module.gradient_checkpointing = enable + is_gradient_checkpointing_set = True + + if not is_gradient_checkpointing_set: + raise ValueError( + f"{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute" + " `gradient_checkpointing` to modules of the model that uses checkpointing." + ) + + def gradient_checkpointing_disable(self): + """ + Deactivates gradient checkpointing for the current model. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + """ + if self.supports_gradient_checkpointing: + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` methid + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=False) + else: + logger.warn( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + self.apply(partial(self._set_gradient_checkpointing, value=False)) + + if getattr(self, "_hf_peft_config_loaded", False): + self.disable_input_require_grads() + + @property + def is_gradient_checkpointing(self) -> bool: + """ + Whether gradient checkpointing is activated for this model or not. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + """ + return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + state_dict: Optional[dict] = None, + save_function: Callable = torch.save, + push_to_hub: bool = False, + max_shard_size: Union[int, str] = "5GB", + safe_serialization: bool = True, + variant: Optional[str] = None, + token: Optional[Union[str, bool]] = None, + save_peft_format: bool = True, + **kwargs, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + [`~PreTrainedModel.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + state_dict (nested dictionary of `torch.Tensor`): + The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only + save parts of the model or if special precautions need to be taken when recovering the state dictionary + of a model (like when using model parallelism). + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + We default it to 5GB in order for models to be able to run easily on free-tier google colab instances + without CPU OOM issues. + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + save_peft_format (`bool`, *optional*, defaults to `True`): + For backward compatibility with PEFT library, in case adapter weights are attached to the model, all + keys of the state dict of adapters needs to be pre-pended with `base_model.model`. Advanced users can + disable this behaviours by setting `save_peft_format` to `False`. + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + _hf_peft_config_loaded = getattr(self, "_hf_peft_config_loaded", False) + + # Checks if the model has been loaded in 8-bit + if ( + getattr(self, "is_loaded_in_8bit", False) + and not getattr(self, "is_8bit_serializable", False) + and not _hf_peft_config_loaded + ): + raise ValueError( + "You are calling `save_pretrained` to a 8-bit converted model you may likely encounter unexepected" + " behaviors. If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed." + ) + + # If the model has adapters attached, you can save the adapters + if getattr(self, "is_loaded_in_4bit", False) and not _hf_peft_config_loaded: + raise NotImplementedError( + "You are calling `save_pretrained` on a 4-bit converted model. This is currently not supported" + ) + + if getattr(self, "_awq_is_fused", False): + raise ValueError("You cannot save an AWQ model that uses fused modules!") + + if "save_config" in kwargs: + warnings.warn( + "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead." + ) + is_main_process = kwargs.pop("save_config") + if safe_serialization and not is_safetensors_available(): + raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # Only save the model itself if we are using distributed training + model_to_save = unwrap_model(self) + + # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" + # we currently don't use this setting automatically, but may start to use with v5 + dtype = get_parameter_dtype(model_to_save) + model_to_save.config.torch_dtype = str(dtype).split(".")[1] + + # Attach architecture to the config + model_to_save.config.architectures = [model_to_save.__class__.__name__] + + # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self.config) + + # Save the config + if is_main_process: + if not _hf_peft_config_loaded: + model_to_save.config.save_pretrained(save_directory) + if self.can_generate(): + model_to_save.generation_config.save_pretrained(save_directory) + + if _hf_peft_config_loaded: + logger.info( + "Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved." + ) + state_dict = model_to_save.get_adapter_state_dict() + + if save_peft_format: + logger.info( + "To match the expected format of the PEFT library, all keys of the state dict of adapters will be pre-pended with `base_model.model`." + ) + peft_state_dict = {} + for key, value in state_dict.items(): + peft_state_dict[f"base_model.model.{key}"] = value + state_dict = peft_state_dict + + active_adapter = self.active_adapters() + + if len(active_adapter) > 1: + raise ValueError( + "Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one " + "by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`" + ) + active_adapter = active_adapter[0] + + current_peft_config = self.peft_config[active_adapter] + current_peft_config.save_pretrained(save_directory) + + # Save the model + if state_dict is None: + state_dict = model_to_save.state_dict() + + # Translate state_dict from smp to hf if saving with smp >= 1.10 + if IS_SAGEMAKER_MP_POST_1_10: + for smp_to_hf, _ in smp.state.module_manager.translate_functions: + state_dict = smp_to_hf(state_dict) + + # Handle the case where some state_dict keys shouldn't be saved + if self._keys_to_ignore_on_save is not None: + for ignore_key in self._keys_to_ignore_on_save: + if ignore_key in state_dict.keys(): + del state_dict[ignore_key] + if safe_serialization: + # Safetensors does not allow tensor aliasing. + # We're going to remove aliases before saving + ptrs = collections.defaultdict(list) + for name, tensor in state_dict.items(): + # Sometimes in the state_dict we have non-tensor objects. + # e.g. in bitsandbytes we have some `str` objects in the state_dict + if isinstance(tensor, torch.Tensor): + ptrs[id_tensor_storage(tensor)].append(name) + else: + # In the non-tensor case, fall back to the pointer of the object itself + ptrs[id(tensor)].append(name) + + # These are all the pointers of shared tensors. + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + warn_names = set() + for names in shared_ptrs.values(): + # Removing the keys which are declared as known duplicates on + # load. This allows to make sure the name which is kept is consistent. + if self._tied_weights_keys is not None: + found = 0 + for name in sorted(names): + matches_pattern = any(re.search(pat, name) for pat in self._tied_weights_keys) + if matches_pattern and name in state_dict: + found += 1 + if found < len(names): + del state_dict[name] + + # When not all duplicates have been cleaned, still remove those keys, but put a clear warning. + # If the link between tensors was done at runtime then `from_pretrained` will not get + # the key back leading to random tensor. A proper warning will be shown + # during reload (if applicable), but since the file is not necessarily compatible with + # the config, better show a proper warning. + found = 0 + for name in names: + if name in state_dict: + found += 1 + if found > 1: + del state_dict[name] + warn_names.add(name) + if len(warn_names) > 0: + logger.warning_once( + f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", + ) + + # Shard the model if it is too big. + if not _hf_peft_config_loaded: + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + weights_name = _add_variant(weights_name, variant) + else: + weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME + + shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") + + # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 + filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "") + reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") + + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + and is_main_process + and reg.fullmatch(filename_no_suffix) is not None + ): + os.remove(full_filename) + + # Save the model + for shard_file, shard in shards.items(): + if safe_serialization: + # At some point we will need to deal better with save_function (used for TPU and other distributed + # joyfulness), but for now this enough. + safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"}) + else: + save_function(shard, os.path.join(save_directory, shard_file)) + + if index is None: + path_to_weights = os.path.join(save_directory, _add_variant(WEIGHTS_NAME, variant)) + logger.info(f"Model weights saved in {path_to_weights}") + else: + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + ) + + def get_memory_footprint(self, return_buffers=True): + r""" + Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. + Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the + PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 + + Arguments: + return_buffers (`bool`, *optional*, defaults to `True`): + Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers + are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch + norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 + """ + mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) + if return_buffers: + mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) + mem = mem + mem_bufs + return mem + + @wraps(torch.nn.Module.cuda) + def cuda(self, *args, **kwargs): + # Checks if the model has been loaded in 8-bit + if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + raise ValueError( + "Calling `cuda()` is not supported for `4-bit` or `8-bit` quantized models. Please use the model as it is, since the" + " model has already been set to the correct devices and casted to the correct `dtype`." + ) + else: + return super().cuda(*args, **kwargs) + + @wraps(torch.nn.Module.to) + def to(self, *args, **kwargs): + # Checks if the model has been loaded in 8-bit + if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + raise ValueError( + "`.to` is not supported for `4-bit` or `8-bit` bitsandbytes models. Please use the model as it is, since the" + " model has already been set to the correct devices and casted to the correct `dtype`." + ) + elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ: + # For GPTQ models, we prevent users from casting the model to another dytpe to restrict unwanted behaviours. + # the correct API should be to load the model with the desired dtype directly through `from_pretrained`. + dtype_present_in_args = False + + if "dtype" not in kwargs: + for arg in args: + if isinstance(arg, torch.dtype): + dtype_present_in_args = True + break + else: + dtype_present_in_args = True + + if dtype_present_in_args: + raise ValueError( + "You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired" + " `dtype` by passing the correct `torch_dtype` argument." + ) + return super().to(*args, **kwargs) + + def half(self, *args): + # Checks if the model is quantized + if getattr(self, "is_quantized", False): + raise ValueError( + "`.half()` is not supported for quantized model. Please use the model as it is, since the" + " model has already been casted to the correct `dtype`." + ) + else: + return super().half(*args) + + def float(self, *args): + # Checks if the model is quantized + if getattr(self, "is_quantized", False): + raise ValueError( + "`.float()` is not supported for quantized model. Please use the model as it is, since the" + " model has already been casted to the correct `dtype`." + ) + else: + return super().float(*args) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + *model_args, + config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + ignore_mismatched_sizes: bool = False, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + use_safetensors: bool = None, + **kwargs, + ): + r""" + Instantiate a pretrained pytorch model from a pre-trained model configuration. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g, + `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to + `True`. + - `None` if you are both providing the configuration and state dictionary (resp. with keyword + arguments `config` and `state_dict`). + model_args (sequence of positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): + Can be either: + + - an instance of a class derived from [`PretrainedConfig`], + - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. + + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + A state dictionary to use instead of a state dictionary loaded from saved weights file. + + This option can be used if you want to create a model from a pretrained configuration but load your own + weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and + [`~PreTrainedModel.from_pretrained`] is not a simpler option. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_tf (`bool`, *optional*, defaults to `False`): + Load the model weights from a TensorFlow checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + from_flax (`bool`, *optional*, defaults to `False`): + Load the model weights from a Flax checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): + Whether or not to raise an error if some of the weights from the checkpoint do not have the same size + as the weights of the model (if for instance, you are instantiating a model with 10 labels from a + checkpoint with 3 labels). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + mirror (`str`, *optional*): + Mirror source to accelerate downloads in China. If you are from China and have an accessibility + problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. + Please refer to the mirror site for more information. + _fast_init(`bool`, *optional*, defaults to `True`): + Whether or not to disable fast initialization. + + + + One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ < + 4.6.0` for seeded model initialization. This argument will be removed at the next major version. See + [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information. + + + + > Parameters for big model inference + + low_cpu_mem_usage(`bool`, *optional*): + Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + This is an experimental feature and a subject to change at any moment. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under a specific `dtype`. The different options + are: + + 1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified + `dtype`, ignoring the model's `config.torch_dtype` if one exists. If not specified + - the model will get loaded in `torch.float` (fp32). + + 2. `"auto"` - A `torch_dtype` entry in the `config.json` file of the model will be + attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in + the checkpoint that's of a floating point type and use that as `dtype`. This will load the model + using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how + the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. + + + + For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or + reach out to the authors and ask them to add this information to the model's card and to insert the + `torch_dtype` entry in `config.json` on the hub. + + + + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_state_dict (`bool`, *optional*): + If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU + RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to + `True` when there is some disk offload. + load_in_8bit (`bool`, *optional*, defaults to `False`): + If `True`, will convert the loaded model into mixed-8bit quantized model. To use this feature please + install `bitsandbytes` (`pip install -U bitsandbytes`). + load_in_4bit (`bool`, *optional*, defaults to `False`): + If `True`, will convert the loaded model into 4bit precision quantized model. To use this feature + install the latest version of `bitsandbytes` (`pip install -U bitsandbytes`). + quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): + A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g + bitsandbytes, gptq) + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_tf` or `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` + is not installed, it will be set to `False`. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + + + Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to + use this method in a firewalled environment. + + + + Examples: + + ```python + >>> from transformers import BertConfig, BertModel + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = BertModel.from_pretrained("bert-base-uncased") + >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). + >>> model = BertModel.from_pretrained("./test/saved_model/") + >>> # Update configuration during loading. + >>> model = BertModel.from_pretrained("bert-base-uncased", output_attentions=True) + >>> assert model.config.output_attentions == True + >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). + >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") + >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) + >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower) + >>> model = BertModel.from_pretrained("bert-base-uncased", from_flax=True) + ``` + + * `low_cpu_mem_usage` algorithm: + + This is an experimental function that loads the model using ~1x model size CPU memory + + Here is how it works: + + 1. save which state_dict keys we have + 2. drop state_dict before the model is created, since the latter takes 1x model size CPU memory + 3. after the model has been instantiated switch to the meta device all params/buffers that + are going to be replaced from the loaded state_dict + 4. load state_dict 2nd time + 5. replace the params/buffers from the state_dict + + Currently, it can't handle deepspeed ZeRO stage 3 and ignores loading errors + + """ + state_dict = kwargs.pop("state_dict", None) + from_tf = kwargs.pop("from_tf", False) + from_flax = kwargs.pop("from_flax", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + output_loading_info = kwargs.pop("output_loading_info", False) + use_auth_token = kwargs.pop("use_auth_token", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + _ = kwargs.pop("mirror", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + _fast_init = kwargs.pop("_fast_init", True) + torch_dtype = kwargs.pop("torch_dtype", None) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", None) + device_map = kwargs.pop("device_map", None) + max_memory = kwargs.pop("max_memory", None) + offload_folder = kwargs.pop("offload_folder", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + load_in_8bit = kwargs.pop("load_in_8bit", False) + load_in_4bit = kwargs.pop("load_in_4bit", False) + quantization_config = kwargs.pop("quantization_config", None) + subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) + variant = kwargs.pop("variant", None) + adapter_kwargs = kwargs.pop("adapter_kwargs", {}) + adapter_name = kwargs.pop("adapter_name", "default") + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) + + if is_fsdp_enabled(): + low_cpu_mem_usage = True + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None and adapter_kwargs is not None and "token" not in adapter_kwargs: + adapter_kwargs["token"] = token + + if use_safetensors is None and not is_safetensors_available(): + use_safetensors = False + + if is_bitsandbytes_available(): + is_8bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse("0.37.2") + else: + is_8bit_serializable = False + + if trust_remote_code is True: + logger.warning( + "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" + " ignored." + ) + + if commit_hash is None: + if not isinstance(config, PretrainedConfig): + # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible + resolved_config_file = cached_file( + pretrained_model_name_or_path, + CONFIG_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + else: + commit_hash = getattr(config, "_commit_hash", None) + + if is_peft_available(): + _adapter_model_path = adapter_kwargs.pop("_adapter_model_path", None) + + if _adapter_model_path is None: + _adapter_model_path = find_adapter_config_file( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + _commit_hash=commit_hash, + **adapter_kwargs, + ) + if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): + with open(_adapter_model_path, "r", encoding="utf-8") as f: + _adapter_model_path = pretrained_model_name_or_path + pretrained_model_name_or_path = json.load(f)["base_model_name_or_path"] + else: + _adapter_model_path = None + + # change device_map into a map if we passed an int, a str or a torch.device + if isinstance(device_map, torch.device): + device_map = {"": device_map} + elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + try: + device_map = {"": torch.device(device_map)} + except RuntimeError: + raise ValueError( + "When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or " + f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}." + ) + elif isinstance(device_map, int): + if device_map < 0: + raise ValueError( + "You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' " + ) + else: + device_map = {"": device_map} + + if device_map is not None: + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + elif not low_cpu_mem_usage: + raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`") + + if low_cpu_mem_usage: + if device_map is not None: + # The max memory utils require PyTorch >= 1.10 to have torch.cuda.mem_get_info. + require_version_core("torch>=1.10") + + if is_deepspeed_zero3_enabled(): + raise ValueError( + "DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`." + ) + elif not is_accelerate_available(): + raise ImportError( + "Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install accelerate`" + ) + + quantization_method_from_args = None + + if quantization_config is not None: + quantization_method_from_args = getattr( + quantization_config, "quant_method", QuantizationMethod.BITS_AND_BYTES + ) + + if quantization_config is None and (load_in_8bit or load_in_4bit): + quantization_method_from_args = QuantizationMethod.BITS_AND_BYTES + quantization_config, kwargs = BitsAndBytesConfig.from_dict( + config_dict={"load_in_8bit": load_in_8bit, "load_in_4bit": load_in_4bit}, + return_unused_kwargs=True, + **kwargs, + ) + elif quantization_method_from_args == QuantizationMethod.BITS_AND_BYTES: + load_in_8bit = quantization_config.load_in_8bit + load_in_4bit = quantization_config.load_in_4bit + + quantization_config_kwargs = { + k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters + } + + if len(quantization_config_kwargs) > 0: + raise ValueError( + "You can't pass `load_in_8bit` or any other `BitsAndBytesConfig` argument as a kwarg when passing " + "`quantization_config` argument at the same time." + ) + + if load_in_8bit or load_in_4bit: + if not torch.cuda.is_available(): + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + if not (is_accelerate_available() and is_bitsandbytes_available()): + raise ImportError( + "Using `load_in_8bit=True` requires Accelerate: `pip install accelerate` and the latest version of" + " bitsandbytes `pip install -i https://test.pypi.org/simple/ bitsandbytes` or" + " `pip install bitsandbytes`." + ) + + if torch_dtype is None: + # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` + logger.info( + f"Overriding torch_dtype={torch_dtype} with `torch_dtype=torch.float16` due to " + "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " + "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" + " torch_dtype=torch.float16 to remove this warning." + ) + torch_dtype = torch.float16 + + if device_map is None: + device_map = {"": torch.cuda.current_device()} + logger.info( + "The device_map was not initialized. " + "Setting device_map to {'':torch.cuda.current_device()}. " + "If you want to use the model for inference, please set device_map ='auto' " + ) + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + + if from_tf or from_flax: + raise ValueError( + "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" + " sure the weights are in PyTorch format." + ) + + user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Load config if we don't provide a configuration + if not isinstance(config, PretrainedConfig): + config_path = config if config is not None else pretrained_model_name_or_path + config, model_kwargs = cls.config_class.from_pretrained( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + else: + # In case one passes a config to `from_pretrained` + "attn_implementation" + # override the `_attn_implementation` attribute to `attn_implementation` of the kwargs + # Please see: https://github.com/huggingface/transformers/issues/28038 + + # Overwrite `config._attn_implementation` by the one from the kwargs --> in auto-factory + # we pop attn_implementation from the kwargs but this handles the case where users + # passes manually the config to `from_pretrained`. + config = copy.deepcopy(config) + + kwarg_attn_imp = kwargs.pop("attn_implementation", None) + if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp: + config._attn_implementation = kwarg_attn_imp + model_kwargs = kwargs + + quantizer = None + quantization_method_from_config = None + if hasattr(config, "quantization_config"): + quantization_method_from_config = config.quantization_config.get( + "quant_method", QuantizationMethod.BITS_AND_BYTES + ) + + if ( + quantization_method_from_args is not None + and quantization_method_from_args == QuantizationMethod.AWQ + and quantization_method_from_config is None + ): + raise ValueError( + "You cannot quantize with AWQ a non-quantized model using transformers, please refer to the quantization documentation" + " to read more about how to quantize models with AWQ algorithm https://huggingface.co/docs/transformers/main_classes/quantization" + ) + + if quantization_method_from_config is not None and quantization_method_from_args is not None: + if quantization_method_from_config != quantization_method_from_args: + raise ValueError( + f"The model is already quantized with {quantization_method_from_config}. " + f"You can't quantize it again with {quantization_method_from_args}" + ) + + if ( + quantization_method_from_config in (QuantizationMethod.GPTQ, QuantizationMethod.AWQ) + and quantization_method_from_args is not None + ): + loading_attr_dict = quantization_config.get_loading_attributes() + for attr, val in loading_attr_dict.items(): + config.quantization_config[attr] = val + quantization_method_from_args = None + logger.warning( + f"You passed `quantization_config` to `from_pretrained` but the model you're loading already has a " + f"`quantization_config` attribute and has already quantized weights. However, loading attributes" + f" (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." + ) + if ( + quantization_method_from_args == QuantizationMethod.GPTQ + or quantization_method_from_config == QuantizationMethod.GPTQ + ): + gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2") + if not gptq_supports_cpu and not torch.cuda.is_available(): + raise RuntimeError("GPU is required to quantize or run quantize model.") + elif not (is_optimum_available() and is_auto_gptq_available()): + raise ImportError( + "Loading a GPTQ quantized model requires optimum (`pip install optimum`) and auto-gptq library (`pip install auto-gptq`)" + ) + elif version.parse(importlib.metadata.version("auto_gptq")) < version.parse("0.4.2"): + raise ImportError( + "You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq`" + ) + else: + # Need to protect the import + from optimum.gptq import GPTQQuantizer + if quantization_method_from_config == QuantizationMethod.GPTQ: + quantization_config = GPTQConfig.from_dict(config.quantization_config) + config.quantization_config = quantization_config + if torch_dtype is None: + torch_dtype = torch.float16 + else: + logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.") + quantizer = GPTQQuantizer.from_dict(quantization_config.to_dict_optimum()) + elif quantization_method_from_config == QuantizationMethod.AWQ: + if not torch.cuda.is_available(): + raise RuntimeError("GPU is required to run AWQ quantized model.") + + if not is_auto_awq_available(): + raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)") + + if not is_accelerate_available(): + raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)") + + if device_map is None: + logger.warning( + "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set " + "your model on a GPU device in order to run your model." + ) + elif device_map is not None: + if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): + raise ValueError( + "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device." + " This is not supported. Please remove the CPU or disk device from the device_map." + ) + + if torch_dtype is None: + torch_dtype = torch.float16 + else: + logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.") + + # Force-set to `True` for more mem efficiency + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + + if ( + is_8bit_serializable + and quantization_method_from_args == QuantizationMethod.BITS_AND_BYTES + and load_in_8bit + ): + if quantization_method_from_config == QuantizationMethod.BITS_AND_BYTES: + logger.warning( + "You passed `quantization_config` to `from_pretrained` but the model you're loading already has a" + " `quantization_config` attribute. The `quantization_config` attribute will be overwritten with the" + " one you passed to `from_pretrained`." + ) + config.quantization_config = quantization_config + elif ( + is_8bit_serializable + and not load_in_8bit + and quantization_method_from_config == QuantizationMethod.BITS_AND_BYTES + ): + quantization_config = config.quantization_config + if isinstance(quantization_config, dict): + quantization_config = BitsAndBytesConfig.from_dict(quantization_config, return_unused_kwargs=False) + elif isinstance(quantization_config, BitsAndBytesConfig): + pass + else: + raise ValueError( + f"Invalid type for `quantization_config`: {type(quantization_config)}. Should be a `dict` or a" + " `BitsAndBytesConfig` instance." + ) + + load_in_8bit = quantization_config.load_in_8bit + + if load_in_8bit: + if torch_dtype is None: + torch_dtype = torch.float16 + if device_map is None: + if torch.cuda.is_available(): + device_map = {"": torch.cuda.current_device()} + else: + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + logger.info( + "The device_map was not initialized. " + "Setting device_map to {'':torch.cuda.current_device()}. " + "If you want to use the model for inference, please set device_map ='auto' " + ) + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + + elif ( + not is_8bit_serializable + and not load_in_8bit + and quantization_method_from_config == QuantizationMethod.BITS_AND_BYTES + ): + logger.warning( + "Detected the presence of a `quantization_config` attribute in the model's configuration but you don't have the correct" + " `bitsandbytes` version to support int8 serialization. Please install the latest version of `bitsandbytes` with " + " `pip install --upgrade bitsandbytes`." + ) + + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the + # index of the files. + is_sharded = False + sharded_metadata = None + # Load model + loading_info = None + + # Keep in fp32 modules + keep_in_fp32_modules = None + use_keep_in_fp32_modules = False + + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + if from_tf and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + ): + # Load from a TF 1.0 checkpoint in priority if from_tf + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + elif from_tf and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) + ): + # Load from a TF 2.0 checkpoint in priority if from_tf + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) + elif from_flax and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + ): + # Load from a Flax checkpoint in priority if from_flax + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + elif use_safetensors is not False and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) + ) + elif use_safetensors is not False and os.path.isfile( + os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + ) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + ) + is_sharded = True + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) + ): + # Load from a PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant) + ) + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) + ): + # Load from a sharded PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant) + ) + is_sharded = True + # At this stage we don't have a weight file so we will raise an error. + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + ) or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use" + " `from_tf=True` to load this model from those weights." + ) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`" + " to load this model from those weights." + ) + elif use_safetensors: + raise EnvironmentError( + f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path}." + ) + else: + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}," + f" {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory" + f" {pretrained_model_name_or_path}." + ) + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): + archive_file = pretrained_model_name_or_path + is_local = True + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")): + if not from_tf: + raise ValueError( + f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " + "from_tf to True to load from this checkpoint." + ) + archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + filename = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) + else: + # set correct filename + if from_tf: + filename = TF2_WEIGHTS_NAME + elif from_flax: + filename = FLAX_WEIGHTS_NAME + elif use_safetensors is not False: + filename = _add_variant(SAFE_WEIGHTS_NAME, variant) + else: + filename = _add_variant(WEIGHTS_NAME, variant) + + try: + # Load from URL or cache if already cached + cached_file_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "resume_download": resume_download, + "local_files_only": local_files_only, + "token": token, + "user_agent": user_agent, + "revision": revision, + "subfolder": subfolder, + "_raise_exceptions_for_missing_entries": False, + "_commit_hash": commit_hash, + } + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant): + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, + _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant), + **cached_file_kwargs, + ) + if resolved_archive_file is not None: + is_sharded = True + elif use_safetensors: + if revision == "main": + resolved_archive_file, revision, is_sharded = auto_conversion( + pretrained_model_name_or_path, **cached_file_kwargs + ) + cached_file_kwargs["revision"] = revision + if resolved_archive_file is None: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} " + "and thus cannot be loaded with `safetensors`. Please make sure that the model has " + "been saved with `safe_serialization=True` or do not set `use_safetensors=True`." + ) + else: + # This repo has no safetensors file of any kind, we switch to PyTorch. + filename = _add_variant(WEIGHTS_NAME, variant) + resolved_archive_file = cached_file( + pretrained_model_name_or_path, filename, **cached_file_kwargs + ) + if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant): + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, + _add_variant(WEIGHTS_INDEX_NAME, variant), + **cached_file_kwargs, + ) + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None: + # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error + # message. + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "token": token, + } + if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights." + " Use `from_tf=True` to load this model from those weights." + ) + elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use" + " `from_flax=True` to load this model from those weights." + ) + elif variant is not None and has_file( + pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs + ): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant" + f" {variant}. Use `variant=None` to load this model from those weights." + ) + else: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or" + f" {FLAX_WEIGHTS_NAME}." + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception as e: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}," + f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." + ) from e + + if is_local: + logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file + else: + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") + else: + resolved_archive_file = None + + # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. + if is_sharded: + # rsolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. + resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( + pretrained_model_name_or_path, + resolved_archive_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + + if ( + is_safetensors_available() + and isinstance(resolved_archive_file, str) + and resolved_archive_file.endswith(".safetensors") + ): + with safe_open(resolved_archive_file, framework="pt") as f: + metadata = f.metadata() + + if metadata.get("format") == "pt": + pass + elif metadata.get("format") == "tf": + from_tf = True + logger.info("A TensorFlow safetensors file is being loaded in a PyTorch model.") + elif metadata.get("format") == "flax": + from_flax = True + logger.info("A Flax safetensors file is being loaded in a PyTorch model.") + else: + raise ValueError( + f"Incompatible safetensors file. File metadata is not ['pt', 'tf', 'flax'] but {metadata.get('format')}" + ) + + from_pt = not (from_tf | from_flax) + + # load pt weights early so that we know which dtype to init the model under + if from_pt: + if not is_sharded and state_dict is None: + # Time to load the checkpoint + state_dict = load_state_dict(resolved_archive_file) + + # set dtype to instantiate the model under: + # 1. If torch_dtype is not None, we use that dtype + # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first + # weights entry that is of a floating type - we assume all floating dtype weights are of the same dtype + # we also may have config.torch_dtype available, but we won't rely on it till v5 + dtype_orig = None + + if torch_dtype is not None: + if isinstance(torch_dtype, str): + if torch_dtype == "auto": + if hasattr(config, "torch_dtype") and config.torch_dtype is not None: + torch_dtype = config.torch_dtype + logger.info(f"Will use torch_dtype={torch_dtype} as defined in model's config object") + else: + if is_sharded and "dtype" in sharded_metadata: + torch_dtype = sharded_metadata["dtype"] + elif not is_sharded: + torch_dtype = get_state_dict_dtype(state_dict) + else: + one_state_dict = load_state_dict(resolved_archive_file[0]) + torch_dtype = get_state_dict_dtype(one_state_dict) + del one_state_dict # free CPU memory + logger.info( + "Since the `torch_dtype` attribute can't be found in model's config object, " + "will use torch_dtype={torch_dtype} as derived from model's weights" + ) + else: + raise ValueError( + f'`torch_dtype` can be either `torch.dtype` or `"auto"`, but received {torch_dtype}' + ) + dtype_orig = cls._set_default_torch_dtype(torch_dtype) + + # Check if `_keep_in_fp32_modules` is not None + use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and ( + torch_dtype == torch.float16 or load_in_4bit or load_in_8bit + ) + + if is_sharded: + loaded_state_dict_keys = sharded_metadata["all_checkpoint_keys"] + else: + loaded_state_dict_keys = list(state_dict.keys()) + if low_cpu_mem_usage or (use_keep_in_fp32_modules and is_accelerate_available()): + # In case some weights need to be kept in float32 and accelerate is not installed, + # we later on want to take the path where state_dict is not None, that is the one + # that do not require accelerate. + state_dict = None + + config.name_or_path = pretrained_model_name_or_path + + # Instantiate model. + init_contexts = [no_init_weights(_enable=_fast_init)] + + if is_deepspeed_zero3_enabled(): + import deepspeed + + logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") + init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts + elif load_in_8bit or load_in_4bit or low_cpu_mem_usage: + init_contexts.append(init_empty_weights()) + + config = copy.deepcopy(config) # We do not want to modify the config inplace in from_pretrained. + config = cls._autoset_attn_implementation( + config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch_dtype, device_map=device_map + ) + + with ContextManagers(init_contexts): + # Let's make sure we don't run the init function of buffer modules + model = cls(config, *model_args, **model_kwargs) + + # make sure we use the model's config since the __init__ call might have copied it + config = model.config + + # Check first if we are `from_pt` + if use_keep_in_fp32_modules: + if is_accelerate_available(): + low_cpu_mem_usage = True + keep_in_fp32_modules = model._keep_in_fp32_modules + else: + keep_in_fp32_modules = [] + + if load_in_8bit or load_in_4bit: + from .integrations import get_keys_to_not_convert, replace_with_bnb_linear + + llm_int8_skip_modules = quantization_config.llm_int8_skip_modules + load_in_8bit_fp32_cpu_offload = quantization_config.llm_int8_enable_fp32_cpu_offload + if load_in_8bit: + logger.info("Detected 8-bit loading: activating 8-bit loading for this model") + else: + logger.info("Detected 4-bit loading: activating 4-bit loading for this model") + + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if llm_int8_skip_modules is None: + modules_to_not_convert = get_keys_to_not_convert(model) + else: + modules_to_not_convert = llm_int8_skip_modules + + if not isinstance(modules_to_not_convert, list): + modules_to_not_convert = [modules_to_not_convert] + + modules_to_not_convert.extend(keep_in_fp32_modules) + + # Extend the modules to not convert to keys that are supposed to be offloaded to `cpu` or `disk` + if isinstance(device_map, dict) and len(device_map.keys()) > 1: + keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] + + if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: + raise ValueError( + "If you want to offload some keys to `cpu` or `disk`, you need to set " + "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " + " converted to 8-bit but kept in 32-bit." + ) + + modules_to_not_convert.extend(keys_on_cpu) + + supports_4bit = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.39.0") + + if load_in_4bit and not supports_4bit: + raise ValueError( + "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training" + " make sure you have the latest version of `bitsandbytes` installed" + ) + + model = replace_with_bnb_linear( + model, modules_to_not_convert=modules_to_not_convert, quantization_config=quantization_config + ) + # training in 8-bit is only available in 0.37.0+ + model._is_quantized_training_enabled = version.parse( + importlib.metadata.version("bitsandbytes") + ) >= version.parse("0.37.0") + + config.quantization_config = quantization_config + model.is_8bit_serializable = is_8bit_serializable + + if load_in_8bit and torch_dtype is None: + logger.warning( + "You are loading your model in 8bit but you did not specify a `torch_dtype` attribute. " + "All non-linear modules will be loaded in full precision." + " If you want to load the other modules in other precision, please specify a `torch_dtype` attribute." + ) + if quantization_method_from_config == QuantizationMethod.GPTQ: + model = quantizer.convert_model(model) + model._is_quantized_training_enabled = True + elif quantization_method_from_config == QuantizationMethod.AWQ: + from .integrations import fuse_awq_modules, get_keys_to_not_convert, replace_with_awq_linear + + modules_to_not_convert = get_keys_to_not_convert(model) + + if quantization_config is None: + quantization_config = AwqConfig.from_dict(config.quantization_config) + + model, has_been_replaced = replace_with_awq_linear( + model, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert + ) + model._is_quantized_training_enabled = False + + if not has_been_replaced: + logger.warning( + "You are loading an AWQ model but no linear modules were found in your model." + " Please double check your model architecture, or submit an issue on github if you think this is" + " a bug." + ) + + if quantization_method_from_config is not None: + model.quantization_method = quantization_method_from_config + elif quantization_method_from_args is not None: + model.quantization_method = quantization_method_from_args + if hasattr(model, "quantization_method"): + model.is_quantized = True + + # We store the original dtype for quantized models as we cannot easily retrieve it + # once the weights have been quantized + # Note that once you have loaded a quantized model, you can't change its dtype so this will + # remain a single source of truth + config._pre_quantization_dtype = torch_dtype + + if isinstance(device_map, str): + special_dtypes = {} + if load_in_8bit or load_in_4bit: + special_dtypes.update( + { + name: torch_dtype + for name, _ in model.named_parameters() + if any(m in name for m in modules_to_not_convert) + } + ) + + special_dtypes.update( + { + name: torch.float32 + for name, _ in model.named_parameters() + if any(m in name for m in keep_in_fp32_modules) + } + ) + + target_dtype = torch_dtype + + if load_in_4bit: + if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): + from accelerate.utils import CustomDtype + + target_dtype = CustomDtype.INT4 + else: + raise ValueError( + "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute" + " the appropriate device map, you should upgrade your `accelerate` library, " + "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map " + "calculation. You may encounter unexpected behavior, or pass your own device map" + ) + elif load_in_8bit: + target_dtype = torch.int8 + + no_split_modules = model._get_no_split_modules(device_map) + if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + + device_map_kwargs = {"no_split_module_classes": no_split_modules} + if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters: + device_map_kwargs["special_dtypes"] = special_dtypes + elif len(special_dtypes) > 0: + logger.warning( + "This model has some weights that should be kept in higher precision, you need to upgrade " + "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)." + ) + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + dtype=target_dtype, + low_zero=(device_map == "balanced_low_0"), + max_memory=max_memory, + **device_map_kwargs, + ) + else: + max_memory = get_max_memory(max_memory) + if getattr(model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + # need more space for buffers that are created during quantization + max_memory = {key: val * 0.90 for key, val in max_memory.items()} + device_map_kwargs["max_memory"] = max_memory + + # Make sure tied weights are tied before creating the device map. + model.tie_weights() + device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) + + if load_in_8bit or load_in_4bit: + # The LM head / tied weights or any last module can stay on disk / CPU + device_map_without_lm_head = { + key: device_map[key] for key in device_map.keys() if key not in modules_to_not_convert + } + if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): + raise ValueError( + """ + Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit + the quantized model. If you want to dispatch the model on the CPU or the disk while keeping + these modules in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom + `device_map` to `from_pretrained`. Check + https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu + for more details. + """ + ) + del device_map_without_lm_head + + elif device_map is not None: + model.tie_weights() + tied_params = find_tied_parameters(model) + # check if we don't have tied param in different devices + check_tied_parameters_on_same_device(tied_params, device_map) + + if from_tf: + if resolved_archive_file.endswith(".index"): + # Load from a TensorFlow 1.X checkpoint - provided by original authors + model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' + else: + # Load from our TensorFlow 2.0 checkpoints + try: + from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model + + model, loading_info = load_tf2_checkpoint_in_pytorch_model( + model, resolved_archive_file, allow_missing_keys=True, output_loading_info=True + ) + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed." + " Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation" + " instructions." + ) + raise + elif from_flax: + try: + from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model + + model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) + except ImportError: + logger.error( + "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for" + " installation instructions." + ) + raise + elif from_pt: + # restore default dtype + if dtype_orig is not None: + torch.set_default_dtype(dtype_orig) + ( + model, + missing_keys, + unexpected_keys, + mismatched_keys, + offload_index, + error_msgs, + ) = cls._load_pretrained_model( + model, + state_dict, + loaded_state_dict_keys, # XXX: rename? + resolved_archive_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=ignore_mismatched_sizes, + sharded_metadata=sharded_metadata, + _fast_init=_fast_init, + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + dtype=torch_dtype, + is_quantized=(getattr(model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES), + keep_in_fp32_modules=keep_in_fp32_modules, + ) + + model.is_loaded_in_4bit = load_in_4bit + model.is_loaded_in_8bit = load_in_8bit + + # make sure token embedding weights are still tied if needed + model.tie_weights() + + # Set model in evaluation mode to deactivate DropOut modules by default + model.eval() + + # If it is a model with generation capabilities, attempt to load the generation config + if model.can_generate() and pretrained_model_name_or_path is not None: + try: + model.generation_config = GenerationConfig.from_pretrained( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + except OSError: + logger.info( + "Generation config file not found, using a generation config created from the model config." + ) + pass + + if ( + quantization_config is not None + and quantization_config.quant_method == QuantizationMethod.AWQ + and quantization_config.do_fuse + ): + model = fuse_awq_modules(model, config.quantization_config) + model._awq_is_fused = True + + # Dispatch model with hooks on all devices if necessary + if device_map is not None: + device_map_kwargs = { + "device_map": device_map, + "offload_dir": offload_folder, + "offload_index": offload_index, + } + if "skip_keys" in inspect.signature(dispatch_model).parameters: + device_map_kwargs["skip_keys"] = model._skip_keys_device_placement + dispatch_model(model, **device_map_kwargs) + + if quantization_method_from_args == QuantizationMethod.GPTQ: + if quantization_config.tokenizer is None: + quantization_config.tokenizer = pretrained_model_name_or_path + if cls.main_input_name != "input_ids": + raise RuntimeError("We can only quantize pure text model.") + quantizer.quantize_model(model, quantization_config.tokenizer) + config.quantization_config = GPTQConfig.from_dict_optimum(quantizer.to_dict()) + model._is_quantized_training_enabled = True + if quantization_method_from_config == QuantizationMethod.GPTQ: + model = quantizer.post_init_model(model) + + if _adapter_model_path is not None: + model.load_adapter( + _adapter_model_path, + adapter_name=adapter_name, + token=token, + adapter_kwargs=adapter_kwargs, + ) + + if output_loading_info: + if loading_info is None: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + "error_msgs": error_msgs, + } + return model, loading_info + + return model + + @classmethod + def _load_pretrained_model( + cls, + model, + state_dict, + loaded_keys, + resolved_archive_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=False, + sharded_metadata=None, + _fast_init=True, + low_cpu_mem_usage=False, + device_map=None, + offload_folder=None, + offload_state_dict=None, + dtype=None, + is_quantized=False, + keep_in_fp32_modules=None, + ): + is_safetensors = False + if is_quantized: + from .integrations import set_module_quantized_tensor_to_device + + if device_map is not None and "disk" in device_map.values(): + archive_file = ( + resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file + ) + is_safetensors = archive_file.endswith(".safetensors") + if offload_folder is None and not is_safetensors: + raise ValueError( + "The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`" + " for them. Alternatively, make sure you have `safetensors` installed if the model you are using" + " offers the weights in this format." + ) + if offload_folder is not None: + os.makedirs(offload_folder, exist_ok=True) + if offload_state_dict is None: + offload_state_dict = True + + is_sharded_safetensors = is_safetensors and sharded_metadata is not None + + # tie the model weights before retrieving the state_dict + model.tie_weights() + + # Retrieve missing & unexpected_keys + model_state_dict = model.state_dict() + expected_keys = list(model_state_dict.keys()) + prefix = model.base_model_prefix + + def _fix_key(key): + if "beta" in key: + return key.replace("beta", "bias") + if "gamma" in key: + return key.replace("gamma", "weight") + return key + + original_loaded_keys = loaded_keys + loaded_keys = [_fix_key(key) for key in loaded_keys] + + if len(prefix) > 0: + has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) + expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) + else: + has_prefix_module = False + expects_prefix_module = False + + # key re-naming operations are never done on the keys + # that are loaded, but always on the keys of the newly initialized model + remove_prefix_from_model = not has_prefix_module and expects_prefix_module + add_prefix_to_model = has_prefix_module and not expects_prefix_module + + if remove_prefix_from_model: + _prefix = f"{prefix}." + expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)] + expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys] + elif add_prefix_to_model: + expected_keys = [".".join([prefix, s]) for s in expected_keys] + + missing_keys = list(set(expected_keys) - set(loaded_keys)) + unexpected_keys = set(loaded_keys) - set(expected_keys) + # Remove nonpersistent buffers from unexpected keys: they are not in the state dict but will be in the model + # buffers + model_buffers = {n for n, _ in model.named_buffers()} + if remove_prefix_from_model: + model_buffers = {key[len(_prefix) :] if key.startswith(_prefix) else key for key in model_buffers} + elif add_prefix_to_model: + model_buffers = {".".join([prefix, key]) for key in model_buffers} + unexpected_keys = list(unexpected_keys - model_buffers) + + model.tie_weights() + if device_map is None and not is_fsdp_enabled(): + ptrs = collections.defaultdict(list) + for name, tensor in model.state_dict().items(): + id_tensor = id_tensor_storage(tensor) + ptrs[id_tensor].append(name) + + # These are all the pointers of shared tensors. + tied_params = [names for _, names in ptrs.items() if len(names) > 1] + else: + # id function doesn't work for meta tensor so we need this function + tied_params = find_tied_parameters(model) + + for group in tied_params: + if remove_prefix_from_model: + group = [key[len(_prefix) :] if key.startswith(_prefix) else key for key in group] + elif add_prefix_to_model: + group = [".".join([prefix, key]) for key in group] + missing_in_group = [k for k in missing_keys if k in group] + if len(missing_in_group) > 0 and len(missing_in_group) < len(group): + missing_keys = [k for k in missing_keys if k not in missing_in_group] + + # Some models may have keys that are not in the state by design, removing them before needlessly warning + # the user. + if cls._keys_to_ignore_on_load_missing is not None: + for pat in cls._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + + if cls._keys_to_ignore_on_load_unexpected is not None: + for pat in cls._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + # retrieve weights on meta device and put them back on CPU. + # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step + if low_cpu_mem_usage: + for key in missing_keys: + if key in list(model_state_dict.keys()): + key = key + elif f"{prefix}.{key}" in list(model_state_dict.keys()): + key = f"{prefix}.{key}" + elif key.startswith(prefix) and ".".join(key.split(".")[1:]) in list(model_state_dict.keys()): + key = ".".join(key.split(".")[1:]) + param = model_state_dict[key] + + # upcast in fp32 if any + target_dtype = dtype + if ( + keep_in_fp32_modules is not None + and dtype == torch.float16 + and any( + module_to_keep_in_fp32 in key.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules + ) + ): + target_dtype = torch.float32 + + if param.device == torch.device("meta"): + if not (is_quantized): + set_module_tensor_to_device(model, key, "cpu", torch.empty(*param.size(), dtype=target_dtype)) + else: + set_module_quantized_tensor_to_device( + model, key, "cpu", torch.empty(*param.size(), dtype=target_dtype) + ) + + # retrieve unintialized modules and initialize before maybe overriding that with the pretrained weights. + if _fast_init: + if remove_prefix_from_model: + _loaded_keys = [f"{prefix}.{k}" for k in loaded_keys] + elif add_prefix_to_model: + _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys] + else: + _loaded_keys = loaded_keys + set_initialized_submodules(model, _loaded_keys) + # This will only initialize submodules that are not marked as initialized by the line above. + model.apply(model._initialize_weights) + + # Set some modules to fp32 if any + if keep_in_fp32_modules is not None: + for name, param in model.named_parameters(): + if any(module_to_keep_in_fp32 in name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules): + # param = param.to(torch.float32) does not work here as only in the local scope. + param.data = param.data.to(torch.float32) + + # Make sure we are able to load base models as well as derived models (with heads) + start_prefix = "" + model_to_load = model + if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module: + start_prefix = cls.base_model_prefix + "." + if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module: + model_to_load = getattr(model, cls.base_model_prefix) + base_model_expected_keys = list(model_to_load.state_dict().keys()) + if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys): + raise ValueError( + "The state dictionary of the model you are trying to load is corrupted. Are you sure it was " + "properly saved?" + ) + if device_map is not None: + device_map = {k.replace(f"{cls.base_model_prefix}.", ""): v for k, v in device_map.items()} + + def _find_mismatched_keys( + state_dict, + model_state_dict, + loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ): + mismatched_keys = [] + if ignore_mismatched_sizes: + for checkpoint_key in loaded_keys: + # If the checkpoint is sharded, we may not have the key here. + if checkpoint_key not in state_dict: + continue + model_key = checkpoint_key + if remove_prefix_from_model: + # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it. + model_key = f"{prefix}.{checkpoint_key}" + elif add_prefix_to_model: + # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it. + model_key = ".".join(checkpoint_key.split(".")[1:]) + + if ( + model_key in model_state_dict + and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape + ): + mismatched_keys.append( + (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) + ) + del state_dict[checkpoint_key] + return mismatched_keys + + if resolved_archive_file is not None: + folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1]) + else: + folder = None + if device_map is not None and is_safetensors: + param_device_map = expand_device_map(device_map, original_loaded_keys, start_prefix) + str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32" + if sharded_metadata is None: + archive_file = ( + resolved_archive_file[0] + if isinstance(resolved_archive_file, (list, tuple)) + else resolved_archive_file + ) + weight_map = {p: archive_file for p in original_loaded_keys} + else: + weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata["weight_map"].items()} + offload_index = { + p[len(start_prefix) :]: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype} + for p, f in weight_map.items() + if p.startswith(start_prefix) and param_device_map[p[len(start_prefix) :]] == "disk" + } + + if state_dict is not None: + # Whole checkpoint + mismatched_keys = _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ) + error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix) + offload_index = None + else: + # Sharded checkpoint or whole but low_cpu_mem_usage==True + + # This should always be a list but, just to be sure. + if not isinstance(resolved_archive_file, list): + resolved_archive_file = [resolved_archive_file] + + error_msgs = [] + mismatched_keys = [] + if not is_safetensors: + offload_index = {} if device_map is not None and "disk" in device_map.values() else None + if offload_state_dict: + state_dict_folder = tempfile.mkdtemp() + state_dict_index = {} + else: + state_dict_folder = None + state_dict_index = None + + if is_sharded_safetensors: + disk_only_shard_files = get_disk_only_shard_files( + device_map, sharded_metadata=sharded_metadata, start_prefix=start_prefix + ) + disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files] + else: + disk_only_shard_files = [] + + if len(resolved_archive_file) > 1: + resolved_archive_file = logging.tqdm(resolved_archive_file, desc="Loading checkpoint shards") + for shard_file in resolved_archive_file: + # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload. + if shard_file in disk_only_shard_files: + continue + state_dict = load_state_dict(shard_file) + + # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not + # matching the weights in the model. + mismatched_keys += _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ) + if low_cpu_mem_usage: + if is_fsdp_enabled() and not is_local_dist_rank_0(): + for key, param in model_to_load.state_dict().items(): + if param.device == torch.device("meta"): + if not (is_quantized): + set_module_tensor_to_device( + model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) + ) + else: + set_module_quantized_tensor_to_device( + model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) + ) + else: + new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( + model_to_load, + state_dict, + loaded_keys, + start_prefix, + expected_keys, + device_map=device_map, + offload_folder=offload_folder, + offload_index=offload_index, + state_dict_folder=state_dict_folder, + state_dict_index=state_dict_index, + dtype=dtype, + is_quantized=is_quantized, + is_safetensors=is_safetensors, + keep_in_fp32_modules=keep_in_fp32_modules, + ) + error_msgs += new_error_msgs + else: + error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix) + + # force memory release + del state_dict + gc.collect() + + if offload_index is not None and len(offload_index) > 0: + if model != model_to_load: + # We need to add the prefix of the base model + prefix = cls.base_model_prefix + if not is_safetensors: + for weight_name in offload_index: + shutil.move( + os.path.join(offload_folder, f"{weight_name}.dat"), + os.path.join(offload_folder, f"{prefix}.{weight_name}.dat"), + ) + offload_index = {f"{prefix}.{key}": value for key, value in offload_index.items()} + if not is_safetensors: + save_offload_index(offload_index, offload_folder) + offload_index = None + + if offload_state_dict: + # Load back temporarily offloaded state dict + load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder) + shutil.rmtree(state_dict_folder) + + if len(error_msgs) > 0: + error_msg = "\n\t".join(error_msgs) + if "size mismatch" in error_msg: + error_msg += ( + "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." + ) + raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") + + if is_quantized: + unexpected_keys = [elem for elem in unexpected_keys if "SCB" not in elem] + missing_keys = [elem for elem in missing_keys if "SCB" not in elem] + + if len(unexpected_keys) > 0: + archs = [] if model.config.architectures is None else model.config.architectures + warner = logger.warning if model.__class__.__name__ in archs else logger.info + warner( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" + " with another architecture (e.g. initializing a BertForSequenceClassification model from a" + " BertForPreTraining model).\n- This IS NOT expected if you are initializing" + f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" + " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." + ) + else: + logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" + f" was trained on, you can already use {model.__class__.__name__} for predictions without further" + " training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs + + def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): + module_keys = {".".join(key.split(".")[:-1]) for key in names} + + # torch.nn.ParameterList is a special case where two parameter keywords + # are appended to the module name, *e.g.* bert.special_embeddings.0 + module_keys = module_keys.union( + {".".join(key.split(".")[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()} + ) + + retrieved_modules = [] + # retrieve all modules that has at least one missing weight name + for name, module in self.named_modules(): + if remove_prefix: + _prefix = f"{self.base_model_prefix}." + name = name[len(_prefix) :] if name.startswith(_prefix) else name + elif add_prefix: + name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix + + if name in module_keys: + retrieved_modules.append(module) + + return retrieved_modules + + @staticmethod + def _load_pretrained_model_low_mem(model, loaded_state_dict_keys, resolved_archive_file, start_prefix=""): + """ + This is an experimental function that loads the model using ~1.x model size CPU memory + + Before you call it do: + + 1. save which state_dict keys are available + 2. drop state_dict before model is created, since the latter takes 1x model size memory + + Here then we continue: + + 3. switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict + 4. load state_dict 2nd time + 5. replace the params/buffers from the state_dict + + Currently, it doesn't handle missing_keys, unexpected_keys, mismatched_keys. It can't handle deepspeed. + """ + + _move_model_to_meta(model, loaded_state_dict_keys, start_prefix) + state_dict = load_state_dict(resolved_archive_file) + error_msgs = _load_state_dict_into_meta_model(model, state_dict, loaded_state_dict_keys, start_prefix) + return error_msgs + + @classmethod + def register_for_auto_class(cls, auto_class="AutoModel"): + """ + Register this class with a given auto class. This should only be used for custom models as the ones in the + library are already mapped with an auto class. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): + The auto class to register this new model with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + def to_bettertransformer(self) -> "PreTrainedModel": + """ + Converts the model to use [PyTorch's native attention + implementation](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html), integrated to + Transformers through [Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). Only a + subset of all Transformers models are supported. + + PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested + tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog + post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). + + Returns: + [`PreTrainedModel`]: The model converted to BetterTransformer. + """ + if not is_optimum_available(): + raise ImportError("The package `optimum` is required to use Better Transformer.") + + from optimum.version import __version__ as optimum_version + + if version.parse(optimum_version) < version.parse("1.7.0"): + raise ImportError( + f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." + ) + + from optimum.bettertransformer import BetterTransformer + + return BetterTransformer.transform(self) + + def reverse_bettertransformer(self): + """ + Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is + used, for example in order to save the model. + + Returns: + [`PreTrainedModel`]: The model converted back to the original modeling. + """ + if not is_optimum_available(): + raise ImportError("The package `optimum` is required to use Better Transformer.") + + from optimum.version import __version__ as optimum_version + + if version.parse(optimum_version) < version.parse("1.7.0"): + raise ImportError( + f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." + ) + + from optimum.bettertransformer import BetterTransformer + + return BetterTransformer.reverse(self) + + def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): + """ + Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given. + """ + + # Skip the check during tracing. + if is_torch_fx_proxy(input_ids) or torch.jit.is_tracing() or is_torchdynamo_compiling(): + return + + if (attention_mask is not None) or (self.config.pad_token_id is None): + return + + # Check only the first and last input IDs to reduce overhead. + if self.config.pad_token_id in input_ids[:, [-1, 0]]: + warn_string = ( + "We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See " + "https://huggingface.co/docs/transformers/troubleshooting" + "#incorrect-output-when-padding-tokens-arent-masked." + ) + + # If the pad token is equal to either BOS, EOS, or SEP, we do not know whether the user should use an + # attention_mask or not. In this case, we should still show a warning because this is a rare case. + if ( + (self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id) + or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id) + or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id) + ): + warn_string += ( + f"\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical " + f"to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), " + f"or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded." + ) + + logger.warning_once(warn_string) + + +PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) +if PreTrainedModel.push_to_hub.__doc__ is not None: + PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( + object="model", object_class="AutoModel", object_files="model file" + ) + + +class PoolerStartLogits(nn.Module): + """ + Compute SQuAD start logits from sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, 1) + + def forward( + self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + + Returns: + `torch.FloatTensor`: The start logits for SQuAD. + """ + x = self.dense(hidden_states).squeeze(-1) + + if p_mask is not None: + if get_parameter_dtype(self) == torch.float16: + x = x * (1 - p_mask) - 65500 * p_mask + else: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerEndLogits(nn.Module): + """ + Compute SQuAD end logits from sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` + to use. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dense_1 = nn.Linear(config.hidden_size, 1) + + def forward( + self, + hidden_states: torch.FloatTensor, + start_states: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + p_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + The hidden states of the first tokens for the labeled span. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + The position of the first token for the labeled span. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + + + + One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides + `start_states`. + + + + Returns: + `torch.FloatTensor`: The end logits for SQuAD. + """ + assert ( + start_states is not None or start_positions is not None + ), "One of start_states, start_positions should be not None" + if start_positions is not None: + slen, hsz = hidden_states.shape[-2:] + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) + start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) + + x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) + x = self.activation(x) + x = self.LayerNorm(x) + x = self.dense_1(x).squeeze(-1) + + if p_mask is not None: + if get_parameter_dtype(self) == torch.float16: + x = x * (1 - p_mask) - 65500 * p_mask + else: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerAnswerClass(nn.Module): + """ + Compute SQuAD 2.0 answer class from classification and start tokens hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model. + """ + + def __init__(self, config): + super().__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) + + def forward( + self, + hidden_states: torch.FloatTensor, + start_states: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + cls_index: Optional[torch.LongTensor] = None, + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + The hidden states of the first tokens for the labeled span. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + The position of the first token for the labeled span. + cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Position of the CLS token for each sentence in the batch. If `None`, takes the last token. + + + + One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides + `start_states`. + + + + Returns: + `torch.FloatTensor`: The SQuAD 2.0 answer class. + """ + # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. + hsz = hidden_states.shape[-1] + assert ( + start_states is not None or start_positions is not None + ), "One of start_states, start_positions should be not None" + if start_positions is not None: + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) + + if cls_index is not None: + cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) + else: + cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) + + x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) + x = self.activation(x) + x = self.dense_1(x).squeeze(-1) + + return x + + +@dataclass +class SquadHeadOutput(ModelOutput): + """ + Base class for outputs of question answering models using a [`~modeling_utils.SQuADHead`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): + Classification loss as the sum of start token, end token (and is_impossible if provided) classification + losses. + start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the top config.start_n_top start token possibilities (beam-search). + start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Indices for the top config.start_n_top start token possibilities (beam-search). + end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities + (beam-search). + end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). + cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the `is_impossible` label of the answers. + + """ + + loss: Optional[torch.FloatTensor] = None + start_top_log_probs: Optional[torch.FloatTensor] = None + start_top_index: Optional[torch.LongTensor] = None + end_top_log_probs: Optional[torch.FloatTensor] = None + end_top_index: Optional[torch.LongTensor] = None + cls_logits: Optional[torch.FloatTensor] = None + + +class SQuADHead(nn.Module): + r""" + A SQuAD head inspired by XLNet. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` + to use. + """ + + def __init__(self, config): + super().__init__() + self.start_n_top = config.start_n_top + self.end_n_top = config.end_n_top + + self.start_logits = PoolerStartLogits(config) + self.end_logits = PoolerEndLogits(config) + self.answer_class = PoolerAnswerClass(config) + + @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) + def forward( + self, + hidden_states: torch.FloatTensor, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + cls_index: Optional[torch.LongTensor] = None, + is_impossible: Optional[torch.LongTensor] = None, + p_mask: Optional[torch.FloatTensor] = None, + return_dict: bool = False, + ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + Final hidden states of the model on the sequence tokens. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Positions of the first token for the labeled span. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Positions of the last token for the labeled span. + cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Position of the CLS token for each sentence in the batch. If `None`, takes the last token. + is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Whether the question has a possible answer in the paragraph or not. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + return_dict (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + """ + start_logits = self.start_logits(hidden_states, p_mask=p_mask) + + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, let's remove the dimension added by batch splitting + for x in (start_positions, end_positions, cls_index, is_impossible): + if x is not None and x.dim() > 1: + x.squeeze_(-1) + + # during training, compute the end logits based on the ground truth of the start position + end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) + + loss_fct = CrossEntropyLoss() + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if cls_index is not None and is_impossible is not None: + # Predict answerability from the representation of CLS and START + cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) + loss_fct_cls = nn.BCEWithLogitsLoss() + cls_loss = loss_fct_cls(cls_logits, is_impossible) + + # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss + total_loss += cls_loss * 0.5 + + return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) + + else: + # during inference, compute the end logits based on beam search + bsz, slen, hsz = hidden_states.size() + start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) + + start_top_log_probs, start_top_index = torch.topk( + start_log_probs, self.start_n_top, dim=-1 + ) # shape (bsz, start_n_top) + start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) + start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) + start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) + + hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( + start_states + ) # shape (bsz, slen, start_n_top, hsz) + p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None + end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) + end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) + + end_top_log_probs, end_top_index = torch.topk( + end_log_probs, self.end_n_top, dim=1 + ) # shape (bsz, end_n_top, start_n_top) + end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) + end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) + + start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) + cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) + + if not return_dict: + return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + else: + return SquadHeadOutput( + start_top_log_probs=start_top_log_probs, + start_top_index=start_top_index, + end_top_log_probs=end_top_log_probs, + end_top_index=end_top_index, + cls_logits=cls_logits, + ) + + +class SequenceSummary(nn.Module): + r""" + Compute a single vector summary of a sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model. Relevant arguments in the config class of the model are (refer to the actual + config class of your model for the default values it uses): + + - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: + + - `"last"` -- Take the last token hidden state (like XLNet) + - `"first"` -- Take the first token hidden state (like Bert) + - `"mean"` -- Take the mean of all tokens hidden states + - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) + - `"attn"` -- Not implemented now, use multi-head attention + + - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. + - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes + (otherwise to `config.hidden_size`). + - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, + another string or `None` will add no activation. + - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. + - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + + self.summary_type = getattr(config, "summary_type", "last") + if self.summary_type == "attn": + # We should use a standard multi-head attention module with absolute positional embedding for that. + # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 + # We can probably just use the multi-head attention module of PyTorch >=1.1.0 + raise NotImplementedError + + self.summary = Identity() + if hasattr(config, "summary_use_proj") and config.summary_use_proj: + if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: + num_classes = config.num_labels + else: + num_classes = config.hidden_size + self.summary = nn.Linear(config.hidden_size, num_classes) + + activation_string = getattr(config, "summary_activation", None) + self.activation: Callable = get_activation(activation_string) if activation_string else Identity() + + self.first_dropout = Identity() + if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: + self.first_dropout = nn.Dropout(config.summary_first_dropout) + + self.last_dropout = Identity() + if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: + self.last_dropout = nn.Dropout(config.summary_last_dropout) + + def forward( + self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None + ) -> torch.FloatTensor: + """ + Compute a single vector summary of a sequence hidden states. + + Args: + hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): + The hidden states of the last layer. + cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): + Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. + + Returns: + `torch.FloatTensor`: The summary of the sequence hidden states. + """ + if self.summary_type == "last": + output = hidden_states[:, -1] + elif self.summary_type == "first": + output = hidden_states[:, 0] + elif self.summary_type == "mean": + output = hidden_states.mean(dim=1) + elif self.summary_type == "cls_index": + if cls_index is None: + cls_index = torch.full_like( + hidden_states[..., :1, :], + hidden_states.shape[-2] - 1, + dtype=torch.long, + ) + else: + cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) + cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) + # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states + output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) + elif self.summary_type == "attn": + raise NotImplementedError + + output = self.first_dropout(output) + output = self.summary(output) + output = self.activation(output) + output = self.last_dropout(output) + + return output + + +def unwrap_model(model: nn.Module) -> nn.Module: + """ + Recursively unwraps a model from potential containers (as used in distributed training). + + Args: + model (`torch.nn.Module`): The model to unwrap. + """ + # since there could be multiple levels of wrapping, unwrap recursively + if hasattr(model, "module"): + return unwrap_model(model.module) + else: + return model + + +def expand_device_map(device_map, param_names, start_prefix): + """ + Expand a device map to return the correspondance parameter name to device. + """ + new_device_map = {} + param_names = [p[len(start_prefix) :] for p in param_names if p.startswith(start_prefix)] + for module, device in device_map.items(): + new_device_map.update( + {p: device for p in param_names if p == module or p.startswith(f"{module}.") or module == ""} + ) + return new_device_map + + +def get_disk_only_shard_files(device_map, sharded_metadata, start_prefix): + """ + Returns the list of shard files containing only weights offloaded to disk. + """ + + weight_map = { + p[len(start_prefix) :]: v for p, v in sharded_metadata["weight_map"].items() if p.startswith(start_prefix) + } + files_content = collections.defaultdict(list) + for weight_name, filename in weight_map.items(): + while len(weight_name) > 0 and weight_name not in device_map: + weight_name = ".".join(weight_name.split(".")[:-1]) + files_content[filename].append(device_map[weight_name]) + + return [fname for fname, devices in files_content.items() if set(devices) == {"disk"}] diff --git a/modified/models/__init__.py b/modified/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..df20f9a47d006a77a03a306253bab8a2345f6740 --- /dev/null +++ b/modified/models/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import ( + + + phi, + +) diff --git a/modified/models/__pycache__/__init__.cpython-39.pyc b/modified/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e427db71ec2c07ef79728d25d8976e07ac9cf74 Binary files /dev/null and b/modified/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/models/auto/__init__.py b/modified/models/auto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..153f7f10def694285c89a78afce24523dc6afe36 --- /dev/null +++ b/modified/models/auto/__init__.py @@ -0,0 +1,397 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_torch_available, +) + + +_import_structure = { + "auto_factory": ["get_values"], + "configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"], + "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"], + "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"], + "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"], + "tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_auto"] = [ + "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING", + "MODEL_FOR_AUDIO_XVECTOR_MAPPING", + "MODEL_FOR_BACKBONE_MAPPING", + "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", + "MODEL_FOR_CAUSAL_LM_MAPPING", + "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_DEPTH_ESTIMATION_MAPPING", + "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", + "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING", + "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", + "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", + "MODEL_FOR_MASKED_LM_MAPPING", + "MODEL_FOR_MASK_GENERATION_MAPPING", + "MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "MODEL_FOR_OBJECT_DETECTION_MAPPING", + "MODEL_FOR_PRETRAINING_MAPPING", + "MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", + "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_TEXT_ENCODING_MAPPING", + "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING", + "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING", + "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING", + "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", + "MODEL_FOR_VISION_2_SEQ_MAPPING", + "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", + "MODEL_MAPPING", + "MODEL_WITH_LM_HEAD_MAPPING", + "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", + "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", + "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", + "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", + "AutoModel", + "AutoBackbone", + "AutoModelForAudioClassification", + "AutoModelForAudioFrameClassification", + "AutoModelForAudioXVector", + "AutoModelForCausalLM", + "AutoModelForCTC", + "AutoModelForDepthEstimation", + "AutoModelForImageClassification", + "AutoModelForImageSegmentation", + "AutoModelForImageToImage", + "AutoModelForInstanceSegmentation", + "AutoModelForMaskGeneration", + "AutoModelForTextEncoding", + "AutoModelForMaskedImageModeling", + "AutoModelForMaskedLM", + "AutoModelForMultipleChoice", + "AutoModelForNextSentencePrediction", + "AutoModelForObjectDetection", + "AutoModelForPreTraining", + "AutoModelForQuestionAnswering", + "AutoModelForSemanticSegmentation", + "AutoModelForSeq2SeqLM", + "AutoModelForSequenceClassification", + "AutoModelForSpeechSeq2Seq", + "AutoModelForTableQuestionAnswering", + "AutoModelForTextToSpectrogram", + "AutoModelForTextToWaveform", + "AutoModelForTokenClassification", + "AutoModelForUniversalSegmentation", + "AutoModelForVideoClassification", + "AutoModelForVision2Seq", + "AutoModelForVisualQuestionAnswering", + "AutoModelForDocumentQuestionAnswering", + "AutoModelWithLMHead", + "AutoModelForZeroShotImageClassification", + "AutoModelForZeroShotObjectDetection", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_auto"] = [ + "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_CAUSAL_LM_MAPPING", + "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_MASK_GENERATION_MAPPING", + "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", + "TF_MODEL_FOR_MASKED_LM_MAPPING", + "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "TF_MODEL_FOR_PRETRAINING_MAPPING", + "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", + "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_TEXT_ENCODING_MAPPING", + "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_VISION_2_SEQ_MAPPING", + "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", + "TF_MODEL_MAPPING", + "TF_MODEL_WITH_LM_HEAD_MAPPING", + "TFAutoModel", + "TFAutoModelForAudioClassification", + "TFAutoModelForCausalLM", + "TFAutoModelForImageClassification", + "TFAutoModelForMaskedImageModeling", + "TFAutoModelForMaskedLM", + "TFAutoModelForMaskGeneration", + "TFAutoModelForMultipleChoice", + "TFAutoModelForNextSentencePrediction", + "TFAutoModelForPreTraining", + "TFAutoModelForDocumentQuestionAnswering", + "TFAutoModelForQuestionAnswering", + "TFAutoModelForSemanticSegmentation", + "TFAutoModelForSeq2SeqLM", + "TFAutoModelForSequenceClassification", + "TFAutoModelForSpeechSeq2Seq", + "TFAutoModelForTableQuestionAnswering", + "TFAutoModelForTextEncoding", + "TFAutoModelForTokenClassification", + "TFAutoModelForVision2Seq", + "TFAutoModelForZeroShotImageClassification", + "TFAutoModelWithLMHead", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_auto"] = [ + "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING", + "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_MASKED_LM_MAPPING", + "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "FLAX_MODEL_FOR_PRETRAINING_MAPPING", + "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING", + "FLAX_MODEL_MAPPING", + "FlaxAutoModel", + "FlaxAutoModelForCausalLM", + "FlaxAutoModelForImageClassification", + "FlaxAutoModelForMaskedLM", + "FlaxAutoModelForMultipleChoice", + "FlaxAutoModelForNextSentencePrediction", + "FlaxAutoModelForPreTraining", + "FlaxAutoModelForQuestionAnswering", + "FlaxAutoModelForSeq2SeqLM", + "FlaxAutoModelForSequenceClassification", + "FlaxAutoModelForSpeechSeq2Seq", + "FlaxAutoModelForTokenClassification", + "FlaxAutoModelForVision2Seq", + ] + + +if TYPE_CHECKING: + from .auto_factory import get_values + from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig + from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor + from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor + from .processing_auto import PROCESSOR_MAPPING, AutoProcessor + from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_auto import ( + MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING, + MODEL_FOR_AUDIO_XVECTOR_MAPPING, + MODEL_FOR_BACKBONE_MAPPING, + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DEPTH_ESTIMATION_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, + MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, + MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, + MODEL_FOR_MASK_GENERATION_MAPPING, + MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, + MODEL_FOR_MASKED_LM_MAPPING, + MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + MODEL_FOR_OBJECT_DETECTION_MAPPING, + MODEL_FOR_PRETRAINING_MAPPING, + MODEL_FOR_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_TEXT_ENCODING_MAPPING, + MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, + MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, + MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, + MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, + MODEL_MAPPING, + MODEL_WITH_LM_HEAD_MAPPING, + AutoBackbone, + AutoModel, + AutoModelForAudioClassification, + AutoModelForAudioFrameClassification, + AutoModelForAudioXVector, + AutoModelForCausalLM, + AutoModelForCTC, + AutoModelForDepthEstimation, + AutoModelForDocumentQuestionAnswering, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForImageToImage, + AutoModelForInstanceSegmentation, + AutoModelForMaskedImageModeling, + AutoModelForMaskedLM, + AutoModelForMaskGeneration, + AutoModelForMultipleChoice, + AutoModelForNextSentencePrediction, + AutoModelForObjectDetection, + AutoModelForPreTraining, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTableQuestionAnswering, + AutoModelForTextEncoding, + AutoModelForTextToSpectrogram, + AutoModelForTextToWaveform, + AutoModelForTokenClassification, + AutoModelForUniversalSegmentation, + AutoModelForVideoClassification, + AutoModelForVision2Seq, + AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotImageClassification, + AutoModelForZeroShotObjectDetection, + AutoModelWithLMHead, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_auto import ( + TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_MASK_GENERATION_MAPPING, + TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, + TF_MODEL_FOR_MASKED_LM_MAPPING, + TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + TF_MODEL_FOR_PRETRAINING_MAPPING, + TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_TEXT_ENCODING_MAPPING, + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, + TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_MAPPING, + TF_MODEL_WITH_LM_HEAD_MAPPING, + TFAutoModel, + TFAutoModelForAudioClassification, + TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, + TFAutoModelForImageClassification, + TFAutoModelForMaskedImageModeling, + TFAutoModelForMaskedLM, + TFAutoModelForMaskGeneration, + TFAutoModelForMultipleChoice, + TFAutoModelForNextSentencePrediction, + TFAutoModelForPreTraining, + TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForSpeechSeq2Seq, + TFAutoModelForTableQuestionAnswering, + TFAutoModelForTextEncoding, + TFAutoModelForTokenClassification, + TFAutoModelForVision2Seq, + TFAutoModelForZeroShotImageClassification, + TFAutoModelWithLMHead, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_auto import ( + FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_MASKED_LM_MAPPING, + FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + FLAX_MODEL_FOR_PRETRAINING_MAPPING, + FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, + FLAX_MODEL_MAPPING, + FlaxAutoModel, + FlaxAutoModelForCausalLM, + FlaxAutoModelForImageClassification, + FlaxAutoModelForMaskedLM, + FlaxAutoModelForMultipleChoice, + FlaxAutoModelForNextSentencePrediction, + FlaxAutoModelForPreTraining, + FlaxAutoModelForQuestionAnswering, + FlaxAutoModelForSeq2SeqLM, + FlaxAutoModelForSequenceClassification, + FlaxAutoModelForSpeechSeq2Seq, + FlaxAutoModelForTokenClassification, + FlaxAutoModelForVision2Seq, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/modified/models/auto/__pycache__/__init__.cpython-39.pyc b/modified/models/auto/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bbb563b90cfcb6f0c2cd76a7dff0fbbb7884476 Binary files /dev/null and b/modified/models/auto/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/auto_factory.cpython-39.pyc b/modified/models/auto/__pycache__/auto_factory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a39e3098428e61c0d6e21dc7c83e897d69ea2a12 Binary files /dev/null and b/modified/models/auto/__pycache__/auto_factory.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/configuration_auto.cpython-39.pyc b/modified/models/auto/__pycache__/configuration_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42990d62c15de9f28f22145270ce7fc9c38215eb Binary files /dev/null and b/modified/models/auto/__pycache__/configuration_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/feature_extraction_auto.cpython-39.pyc b/modified/models/auto/__pycache__/feature_extraction_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f586ba082ee5b654b8204c04db7713373c5af0d4 Binary files /dev/null and b/modified/models/auto/__pycache__/feature_extraction_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/image_processing_auto.cpython-39.pyc b/modified/models/auto/__pycache__/image_processing_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3737f0963307634004b0185eff413e3f3a775b8 Binary files /dev/null and b/modified/models/auto/__pycache__/image_processing_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/modeling_auto.cpython-39.pyc b/modified/models/auto/__pycache__/modeling_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfe6806da90568af801f030985bcbc1a8e97b8ad Binary files /dev/null and b/modified/models/auto/__pycache__/modeling_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/modeling_flax_auto.cpython-39.pyc b/modified/models/auto/__pycache__/modeling_flax_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..482c370dbd0ff8b6bd0eed6241759513d4f1b9cc Binary files /dev/null and b/modified/models/auto/__pycache__/modeling_flax_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/modeling_tf_auto.cpython-39.pyc b/modified/models/auto/__pycache__/modeling_tf_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a6466ab695a81c22652738480548be096ced27 Binary files /dev/null and b/modified/models/auto/__pycache__/modeling_tf_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/processing_auto.cpython-39.pyc b/modified/models/auto/__pycache__/processing_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3f2f32c351cecc6bc090b53ae78121ec7117b7c Binary files /dev/null and b/modified/models/auto/__pycache__/processing_auto.cpython-39.pyc differ diff --git a/modified/models/auto/__pycache__/tokenization_auto.cpython-39.pyc b/modified/models/auto/__pycache__/tokenization_auto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34669e54148e7ee4657b9e5d16064e68d804e85b Binary files /dev/null and b/modified/models/auto/__pycache__/tokenization_auto.cpython-39.pyc differ diff --git a/modified/models/auto/auto_factory.py b/modified/models/auto/auto_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..92dbb006f6d5c5ac08108f6b04a80a88068b0b69 --- /dev/null +++ b/modified/models/auto/auto_factory.py @@ -0,0 +1,812 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Factory function to build auto-model classes.""" +import copy +import importlib +import json +import os +import warnings +from collections import OrderedDict + +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...utils import ( + CONFIG_NAME, + cached_file, + copy_func, + extract_commit_hash, + find_adapter_config_file, + is_peft_available, + logging, + requires_backends, +) +from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings + + +logger = logging.get_logger(__name__) + + +CLASS_DOCSTRING = """ + This is a generic model class that will be instantiated as one of the model classes of the library when created + with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class + method. + + This class cannot be instantiated directly using `__init__()` (throws an error). +""" + +FROM_CONFIG_DOCSTRING = """ + Instantiates one of the model classes of the library from a configuration. + + Note: + Loading a model from its configuration file does **not** load the model weights. It only affects the + model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights. + + Args: + config ([`PretrainedConfig`]): + The model class to instantiate is selected based on the configuration class: + + List options + + Examples: + + ```python + >>> from transformers import AutoConfig, BaseAutoModelClass + + >>> # Download configuration from huggingface.co and cache. + >>> config = AutoConfig.from_pretrained("checkpoint_placeholder") + >>> model = BaseAutoModelClass.from_config(config) + ``` +""" + +FROM_PRETRAINED_TORCH_DOCSTRING = """ + Instantiate one of the model classes of the library from a pretrained model. + + The model class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by + falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are + deactivated). To train the model, you should first set it back in training mode with `model.train()` + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + model_args (additional positional arguments, *optional*): + Will be passed along to the underlying model `__init__()` method. + config ([`PretrainedConfig`], *optional*): + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + state_dict (*Dict[str, torch.Tensor]*, *optional*): + A state dictionary to use instead of a state dictionary loaded from saved weights file. + + This option can be used if you want to create a model from a pretrained configuration but load your own + weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and + [`~PreTrainedModel.from_pretrained`] is not a simpler option. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_tf (`bool`, *optional*, defaults to `False`): + Load the model weights from a TensorFlow checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (e.g., not try downloading the model). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + code_revision (`str`, *optional*, defaults to `"main"`): + The specific revision to use for the code on the Hub, if the code leaves in a different repository than + the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based + system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier + allowed by git. + kwargs (additional keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + Examples: + + ```python + >>> from transformers import AutoConfig, BaseAutoModelClass + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder") + + >>> # Update configuration during loading + >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True) + >>> model.config.output_attentions + True + + >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) + >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json") + >>> model = BaseAutoModelClass.from_pretrained( + ... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config + ... ) + ``` +""" + +FROM_PRETRAINED_TF_DOCSTRING = """ + Instantiate one of the model classes of the library from a pretrained model. + + The model class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by + falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this + case, `from_pt` should be set to `True` and a configuration object should be provided as `config` + argument. This loading path is slower than converting the PyTorch model in a TensorFlow model + using the provided conversion scripts and loading the TensorFlow model afterwards. + model_args (additional positional arguments, *optional*): + Will be passed along to the underlying model `__init__()` method. + config ([`PretrainedConfig`], *optional*): + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_pt (`bool`, *optional*, defaults to `False`): + Load the model weights from a PyTorch checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (e.g., not try downloading the model). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + code_revision (`str`, *optional*, defaults to `"main"`): + The specific revision to use for the code on the Hub, if the code leaves in a different repository than + the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based + system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier + allowed by git. + kwargs (additional keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + Examples: + + ```python + >>> from transformers import AutoConfig, BaseAutoModelClass + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder") + + >>> # Update configuration during loading + >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True) + >>> model.config.output_attentions + True + + >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) + >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json") + >>> model = BaseAutoModelClass.from_pretrained( + ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config + ... ) + ``` +""" + +FROM_PRETRAINED_FLAX_DOCSTRING = """ + Instantiate one of the model classes of the library from a pretrained model. + + The model class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by + falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this + case, `from_pt` should be set to `True` and a configuration object should be provided as `config` + argument. This loading path is slower than converting the PyTorch model in a TensorFlow model + using the provided conversion scripts and loading the TensorFlow model afterwards. + model_args (additional positional arguments, *optional*): + Will be passed along to the underlying model `__init__()` method. + config ([`PretrainedConfig`], *optional*): + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_pt (`bool`, *optional*, defaults to `False`): + Load the model weights from a PyTorch checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (e.g., not try downloading the model). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + code_revision (`str`, *optional*, defaults to `"main"`): + The specific revision to use for the code on the Hub, if the code leaves in a different repository than + the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based + system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier + allowed by git. + kwargs (additional keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + Examples: + + ```python + >>> from transformers import AutoConfig, BaseAutoModelClass + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder") + + >>> # Update configuration during loading + >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True) + >>> model.config.output_attentions + True + + >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) + >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json") + >>> model = BaseAutoModelClass.from_pretrained( + ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config + ... ) + ``` +""" + + +def _get_model_class(config, model_mapping): + supported_models = model_mapping[type(config)] + if not isinstance(supported_models, (list, tuple)): + return supported_models + + name_to_model = {model.__name__: model for model in supported_models} + architectures = getattr(config, "architectures", []) + for arch in architectures: + if arch in name_to_model: + return name_to_model[arch] + elif f"TF{arch}" in name_to_model: + return name_to_model[f"TF{arch}"] + elif f"Flax{arch}" in name_to_model: + return name_to_model[f"Flax{arch}"] + + # If not architecture is set in the config or match the supported models, the first element of the tuple is the + # defaults. + return supported_models[0] + + +class _BaseAutoModelClass: + # Base class for auto models. + _model_mapping = None + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_config(config)` methods." + ) + + @classmethod + def from_config(cls, config, **kwargs): + trust_remote_code = kwargs.pop("trust_remote_code", None) + has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map + has_local_code = type(config) in cls._model_mapping.keys() + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, config._name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + class_ref = config.auto_map[cls.__name__] + if "--" in class_ref: + repo_id, class_ref = class_ref.split("--") + else: + repo_id = config.name_or_path + model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs) + if os.path.isdir(config._name_or_path): + model_class.register_for_auto_class(cls.__name__) + else: + cls.register(config.__class__, model_class, exist_ok=True) + _ = kwargs.pop("code_revision", None) + return model_class._from_config(config, **kwargs) + elif type(config) in cls._model_mapping.keys(): + model_class = _get_model_class(config, cls._model_mapping) + return model_class._from_config(config, **kwargs) + + raise ValueError( + f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" + f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." + ) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + hub_kwargs_names = [ + "cache_dir", + "force_download", + "local_files_only", + "proxies", + "resume_download", + "revision", + "subfolder", + "use_auth_token", + "token", + ] + hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs} + code_revision = kwargs.pop("code_revision", None) + commit_hash = kwargs.pop("_commit_hash", None) + adapter_kwargs = kwargs.pop("adapter_kwargs", None) + + token = hub_kwargs.pop("token", None) + use_auth_token = hub_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + hub_kwargs["token"] = token + + if commit_hash is None: + if not isinstance(config, PretrainedConfig): + # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible + resolved_config_file = cached_file( + pretrained_model_name_or_path, + CONFIG_NAME, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + **hub_kwargs, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + else: + commit_hash = getattr(config, "_commit_hash", None) + + if is_peft_available(): + if adapter_kwargs is None: + adapter_kwargs = {} + if token is not None: + adapter_kwargs["token"] = token + + maybe_adapter_path = find_adapter_config_file( + pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs + ) + + if maybe_adapter_path is not None: + with open(maybe_adapter_path, "r", encoding="utf-8") as f: + adapter_config = json.load(f) + + adapter_kwargs["_adapter_model_path"] = pretrained_model_name_or_path + pretrained_model_name_or_path = adapter_config["base_model_name_or_path"] + + if not isinstance(config, PretrainedConfig): + kwargs_orig = copy.deepcopy(kwargs) + # ensure not to pollute the config object with torch_dtype="auto" - since it's + # meaningless in the context of the config object - torch.dtype values are acceptable + if kwargs.get("torch_dtype", None) == "auto": + _ = kwargs.pop("torch_dtype") + # to not overwrite the quantization_config if config has a quantization_config + if kwargs.get("quantization_config", None) is not None: + _ = kwargs.pop("quantization_config") + + config, kwargs = AutoConfig.from_pretrained( + pretrained_model_name_or_path, + return_unused_kwargs=True, + trust_remote_code=trust_remote_code, + code_revision=code_revision, + _commit_hash=commit_hash, + **hub_kwargs, + **kwargs, + ) + + # if torch_dtype=auto was passed here, ensure to pass it on + if kwargs_orig.get("torch_dtype", None) == "auto": + kwargs["torch_dtype"] = "auto" + if kwargs_orig.get("quantization_config", None) is not None: + kwargs["quantization_config"] = kwargs_orig["quantization_config"] + + has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map + has_local_code = type(config) in cls._model_mapping.keys() + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + # Set the adapter kwargs + kwargs["adapter_kwargs"] = adapter_kwargs + + if has_remote_code and trust_remote_code: + class_ref = config.auto_map[cls.__name__] + model_class = get_class_from_dynamic_module( + class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs + ) + _ = hub_kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + model_class.register_for_auto_class(cls.__name__) + else: + cls.register(config.__class__, model_class, exist_ok=True) + return model_class.from_pretrained( + pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs + ) + elif type(config) in cls._model_mapping.keys(): + model_class = _get_model_class(config, cls._model_mapping) + return model_class.from_pretrained( + pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs + ) + raise ValueError( + f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" + f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." + ) + + @classmethod + def register(cls, config_class, model_class, exist_ok=False): + """ + Register a new model for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + model_class ([`PreTrainedModel`]): + The model to register. + """ + if hasattr(model_class, "config_class") and model_class.config_class != config_class: + raise ValueError( + "The model class you are passing has a `config_class` attribute that is not consistent with the " + f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix " + "one of those so they match!" + ) + cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok) + + +class _BaseAutoBackboneClass(_BaseAutoModelClass): + # Base class for auto backbone models. + _model_mapping = None + + @classmethod + def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + requires_backends(cls, ["vision", "timm"]) + from ...models.timm_backbone import TimmBackboneConfig + + config = kwargs.pop("config", TimmBackboneConfig()) + + use_timm = kwargs.pop("use_timm_backbone", True) + if not use_timm: + raise ValueError("`use_timm_backbone` must be `True` for timm backbones") + + if kwargs.get("out_features", None) is not None: + raise ValueError("Cannot specify `out_features` for timm backbones") + + if kwargs.get("output_loading_info", False): + raise ValueError("Cannot specify `output_loading_info=True` when loading from timm") + + num_channels = kwargs.pop("num_channels", config.num_channels) + features_only = kwargs.pop("features_only", config.features_only) + use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) + out_indices = kwargs.pop("out_indices", config.out_indices) + config = TimmBackboneConfig( + backbone=pretrained_model_name_or_path, + num_channels=num_channels, + features_only=features_only, + use_pretrained_backbone=use_pretrained_backbone, + out_indices=out_indices, + ) + return super().from_config(config, **kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + if kwargs.get("use_timm_backbone", False): + return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + + +def insert_head_doc(docstring, head_doc=""): + if len(head_doc) > 0: + return docstring.replace( + "one of the model classes of the library ", + f"one of the model classes of the library (with a {head_doc} head) ", + ) + return docstring.replace( + "one of the model classes of the library ", "one of the base model classes of the library " + ) + + +def auto_class_update(cls, checkpoint_for_example="bert-base-cased", head_doc=""): + # Create a new class with the right name from the base class + model_mapping = cls._model_mapping + name = cls.__name__ + class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc) + cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name) + + # Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't + # have a specific docstrings for them. + from_config = copy_func(_BaseAutoModelClass.from_config) + from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc) + from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name) + from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example) + from_config.__doc__ = from_config_docstring + from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config) + cls.from_config = classmethod(from_config) + + if name.startswith("TF"): + from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING + elif name.startswith("Flax"): + from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING + else: + from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING + from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained) + from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc) + from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name) + from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example) + shortcut = checkpoint_for_example.split("/")[-1].split("-")[0] + from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut) + from_pretrained.__doc__ = from_pretrained_docstring + from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained) + cls.from_pretrained = classmethod(from_pretrained) + return cls + + +def get_values(model_mapping): + result = [] + for model in model_mapping.values(): + if isinstance(model, (list, tuple)): + result += list(model) + else: + result.append(model) + + return result + + +def getattribute_from_module(module, attr): + if attr is None: + return None + if isinstance(attr, tuple): + return tuple(getattribute_from_module(module, a) for a in attr) + if hasattr(module, attr): + return getattr(module, attr) + # Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the + # object at the top level. + transformers_module = importlib.import_module("transformers") + + if module != transformers_module: + try: + return getattribute_from_module(transformers_module, attr) + except ValueError: + raise ValueError(f"Could not find {attr} neither in {module} nor in {transformers_module}!") + else: + raise ValueError(f"Could not find {attr} in {transformers_module}!") + + +class _LazyAutoMapping(OrderedDict): + """ + " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed. + + Args: + - config_mapping: The map model type to config class + - model_mapping: The map model type to model (or tokenizer) class + """ + + def __init__(self, config_mapping, model_mapping): + self._config_mapping = config_mapping + self._reverse_config_mapping = {v: k for k, v in config_mapping.items()} + self._model_mapping = model_mapping + self._model_mapping._model_mapping = self + self._extra_content = {} + self._modules = {} + + def __len__(self): + common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys()) + return len(common_keys) + len(self._extra_content) + + def __getitem__(self, key): + if key in self._extra_content: + return self._extra_content[key] + model_type = self._reverse_config_mapping[key.__name__] + if model_type in self._model_mapping: + model_name = self._model_mapping[model_type] + return self._load_attr_from_module(model_type, model_name) + + # Maybe there was several model types associated with this config. + model_types = [k for k, v in self._config_mapping.items() if v == key.__name__] + for mtype in model_types: + if mtype in self._model_mapping: + model_name = self._model_mapping[mtype] + return self._load_attr_from_module(mtype, model_name) + raise KeyError(key) + + def _load_attr_from_module(self, model_type, attr): + module_name = model_type_to_module_name(model_type) + if module_name not in self._modules: + self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models") + return getattribute_from_module(self._modules[module_name], attr) + + def keys(self): + mapping_keys = [ + self._load_attr_from_module(key, name) + for key, name in self._config_mapping.items() + if key in self._model_mapping.keys() + ] + return mapping_keys + list(self._extra_content.keys()) + + def get(self, key, default): + try: + return self.__getitem__(key) + except KeyError: + return default + + def __bool__(self): + return bool(self.keys()) + + def values(self): + mapping_values = [ + self._load_attr_from_module(key, name) + for key, name in self._model_mapping.items() + if key in self._config_mapping.keys() + ] + return mapping_values + list(self._extra_content.values()) + + def items(self): + mapping_items = [ + ( + self._load_attr_from_module(key, self._config_mapping[key]), + self._load_attr_from_module(key, self._model_mapping[key]), + ) + for key in self._model_mapping.keys() + if key in self._config_mapping.keys() + ] + return mapping_items + list(self._extra_content.items()) + + def __iter__(self): + return iter(self.keys()) + + def __contains__(self, item): + if item in self._extra_content: + return True + if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping: + return False + model_type = self._reverse_config_mapping[item.__name__] + return model_type in self._model_mapping + + def register(self, key, value, exist_ok=False): + """ + Register a new model in this mapping. + """ + if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping: + model_type = self._reverse_config_mapping[key.__name__] + if model_type in self._model_mapping.keys() and not exist_ok: + raise ValueError(f"'{key}' is already used by a Transformers model.") + + self._extra_content[key] = value diff --git a/modified/models/auto/configuration_auto.py b/modified/models/auto/configuration_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..b91226ac87789712b9b13bc36af98401634e1f45 --- /dev/null +++ b/modified/models/auto/configuration_auto.py @@ -0,0 +1,1138 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Config class.""" +import importlib +import os +import re +import warnings +from collections import OrderedDict +from typing import List, Union + +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...utils import CONFIG_NAME, logging + + +logger = logging.get_logger(__name__) + +CONFIG_MAPPING_NAMES = OrderedDict( + [ + # Add configs here + ("albert", "AlbertConfig"), + ("align", "AlignConfig"), + ("altclip", "AltCLIPConfig"), + ("audio-spectrogram-transformer", "ASTConfig"), + ("autoformer", "AutoformerConfig"), + ("bark", "BarkConfig"), + ("bart", "BartConfig"), + ("beit", "BeitConfig"), + ("bert", "BertConfig"), + ("bert-generation", "BertGenerationConfig"), + ("big_bird", "BigBirdConfig"), + ("bigbird_pegasus", "BigBirdPegasusConfig"), + ("biogpt", "BioGptConfig"), + ("bit", "BitConfig"), + ("blenderbot", "BlenderbotConfig"), + ("blenderbot-small", "BlenderbotSmallConfig"), + ("blip", "BlipConfig"), + ("blip-2", "Blip2Config"), + ("bloom", "BloomConfig"), + ("bridgetower", "BridgeTowerConfig"), + ("bros", "BrosConfig"), + ("camembert", "CamembertConfig"), + ("canine", "CanineConfig"), + ("chinese_clip", "ChineseCLIPConfig"), + ("clap", "ClapConfig"), + ("clip", "CLIPConfig"), + ("clip_vision_model", "CLIPVisionConfig"), + ("clipseg", "CLIPSegConfig"), + ("clvp", "ClvpConfig"), + ("code_llama", "LlamaConfig"), + ("codegen", "CodeGenConfig"), + ("conditional_detr", "ConditionalDetrConfig"), + ("convbert", "ConvBertConfig"), + ("convnext", "ConvNextConfig"), + ("convnextv2", "ConvNextV2Config"), + ("cpmant", "CpmAntConfig"), + ("ctrl", "CTRLConfig"), + ("cvt", "CvtConfig"), + ("data2vec-audio", "Data2VecAudioConfig"), + ("data2vec-text", "Data2VecTextConfig"), + ("data2vec-vision", "Data2VecVisionConfig"), + ("deberta", "DebertaConfig"), + ("deberta-v2", "DebertaV2Config"), + ("decision_transformer", "DecisionTransformerConfig"), + ("deformable_detr", "DeformableDetrConfig"), + ("deit", "DeiTConfig"), + ("deta", "DetaConfig"), + ("detr", "DetrConfig"), + ("dinat", "DinatConfig"), + ("dinov2", "Dinov2Config"), + ("distilbert", "DistilBertConfig"), + ("donut-swin", "DonutSwinConfig"), + ("dpr", "DPRConfig"), + ("dpt", "DPTConfig"), + ("efficientformer", "EfficientFormerConfig"), + ("efficientnet", "EfficientNetConfig"), + ("electra", "ElectraConfig"), + ("encodec", "EncodecConfig"), + ("encoder-decoder", "EncoderDecoderConfig"), + ("ernie", "ErnieConfig"), + ("ernie_m", "ErnieMConfig"), + ("esm", "EsmConfig"), + ("falcon", "FalconConfig"), + ("flaubert", "FlaubertConfig"), + ("flava", "FlavaConfig"), + ("fnet", "FNetConfig"), + ("focalnet", "FocalNetConfig"), + ("fsmt", "FSMTConfig"), + ("funnel", "FunnelConfig"), + ("fuyu", "FuyuConfig"), + ("git", "GitConfig"), + ("glpn", "GLPNConfig"), + ("gpt-sw3", "GPT2Config"), + ("gpt2", "GPT2Config"), + ("gpt_bigcode", "GPTBigCodeConfig"), + ("gpt_neo", "GPTNeoConfig"), + ("gpt_neox", "GPTNeoXConfig"), + ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"), + ("gptj", "GPTJConfig"), + ("gptsan-japanese", "GPTSanJapaneseConfig"), + ("graphormer", "GraphormerConfig"), + ("groupvit", "GroupViTConfig"), + ("hubert", "HubertConfig"), + ("ibert", "IBertConfig"), + ("idefics", "IdeficsConfig"), + ("imagegpt", "ImageGPTConfig"), + ("informer", "InformerConfig"), + ("instructblip", "InstructBlipConfig"), + ("jukebox", "JukeboxConfig"), + ("kosmos-2", "Kosmos2Config"), + ("layoutlm", "LayoutLMConfig"), + ("layoutlmv2", "LayoutLMv2Config"), + ("layoutlmv3", "LayoutLMv3Config"), + ("led", "LEDConfig"), + ("levit", "LevitConfig"), + ("lilt", "LiltConfig"), + ("llama", "LlamaConfig"), + ("llava", "LlavaConfig"), + ("longformer", "LongformerConfig"), + ("longt5", "LongT5Config"), + ("luke", "LukeConfig"), + ("lxmert", "LxmertConfig"), + ("m2m_100", "M2M100Config"), + ("marian", "MarianConfig"), + ("markuplm", "MarkupLMConfig"), + ("mask2former", "Mask2FormerConfig"), + ("maskformer", "MaskFormerConfig"), + ("maskformer-swin", "MaskFormerSwinConfig"), + ("mbart", "MBartConfig"), + ("mctct", "MCTCTConfig"), + ("mega", "MegaConfig"), + ("megatron-bert", "MegatronBertConfig"), + ("mgp-str", "MgpstrConfig"), + ("mistral", "MistralConfig"), + ("mixtral", "MixtralConfig"), + ("mobilebert", "MobileBertConfig"), + ("mobilenet_v1", "MobileNetV1Config"), + ("mobilenet_v2", "MobileNetV2Config"), + ("mobilevit", "MobileViTConfig"), + ("mobilevitv2", "MobileViTV2Config"), + ("mpnet", "MPNetConfig"), + ("mpt", "MptConfig"), + ("mra", "MraConfig"), + ("mt5", "MT5Config"), + ("musicgen", "MusicgenConfig"), + ("mvp", "MvpConfig"), + ("nat", "NatConfig"), + ("nezha", "NezhaConfig"), + ("nllb-moe", "NllbMoeConfig"), + ("nougat", "VisionEncoderDecoderConfig"), + ("nystromformer", "NystromformerConfig"), + ("oneformer", "OneFormerConfig"), + ("open-llama", "OpenLlamaConfig"), + ("openai-gpt", "OpenAIGPTConfig"), + ("opt", "OPTConfig"), + ("owlv2", "Owlv2Config"), + ("owlvit", "OwlViTConfig"), + ("patchtsmixer", "PatchTSMixerConfig"), + ("patchtst", "PatchTSTConfig"), + ("pegasus", "PegasusConfig"), + ("pegasus_x", "PegasusXConfig"), + ("perceiver", "PerceiverConfig"), + ("persimmon", "PersimmonConfig"), + ("phi", "PhiConfig"), + ("pix2struct", "Pix2StructConfig"), + ("plbart", "PLBartConfig"), + ("poolformer", "PoolFormerConfig"), + ("pop2piano", "Pop2PianoConfig"), + ("prophetnet", "ProphetNetConfig"), + ("pvt", "PvtConfig"), + ("qdqbert", "QDQBertConfig"), + ("rag", "RagConfig"), + ("realm", "RealmConfig"), + ("reformer", "ReformerConfig"), + ("regnet", "RegNetConfig"), + ("rembert", "RemBertConfig"), + ("resnet", "ResNetConfig"), + ("retribert", "RetriBertConfig"), + ("roberta", "RobertaConfig"), + ("roberta-prelayernorm", "RobertaPreLayerNormConfig"), + ("roc_bert", "RoCBertConfig"), + ("roformer", "RoFormerConfig"), + ("rwkv", "RwkvConfig"), + ("sam", "SamConfig"), + ("seamless_m4t", "SeamlessM4TConfig"), + ("seamless_m4t_v2", "SeamlessM4Tv2Config"), + ("segformer", "SegformerConfig"), + ("sew", "SEWConfig"), + ("sew-d", "SEWDConfig"), + ("speech-encoder-decoder", "SpeechEncoderDecoderConfig"), + ("speech_to_text", "Speech2TextConfig"), + ("speech_to_text_2", "Speech2Text2Config"), + ("speecht5", "SpeechT5Config"), + ("splinter", "SplinterConfig"), + ("squeezebert", "SqueezeBertConfig"), + ("swiftformer", "SwiftFormerConfig"), + ("swin", "SwinConfig"), + ("swin2sr", "Swin2SRConfig"), + ("swinv2", "Swinv2Config"), + ("switch_transformers", "SwitchTransformersConfig"), + ("t5", "T5Config"), + ("table-transformer", "TableTransformerConfig"), + ("tapas", "TapasConfig"), + ("time_series_transformer", "TimeSeriesTransformerConfig"), + ("timesformer", "TimesformerConfig"), + ("timm_backbone", "TimmBackboneConfig"), + ("trajectory_transformer", "TrajectoryTransformerConfig"), + ("transfo-xl", "TransfoXLConfig"), + ("trocr", "TrOCRConfig"), + ("tvlt", "TvltConfig"), + ("tvp", "TvpConfig"), + ("umt5", "UMT5Config"), + ("unispeech", "UniSpeechConfig"), + ("unispeech-sat", "UniSpeechSatConfig"), + ("univnet", "UnivNetConfig"), + ("upernet", "UperNetConfig"), + ("van", "VanConfig"), + ("videomae", "VideoMAEConfig"), + ("vilt", "ViltConfig"), + ("vipllava", "VipLlavaConfig"), + ("vision-encoder-decoder", "VisionEncoderDecoderConfig"), + ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"), + ("visual_bert", "VisualBertConfig"), + ("vit", "ViTConfig"), + ("vit_hybrid", "ViTHybridConfig"), + ("vit_mae", "ViTMAEConfig"), + ("vit_msn", "ViTMSNConfig"), + ("vitdet", "VitDetConfig"), + ("vitmatte", "VitMatteConfig"), + ("vits", "VitsConfig"), + ("vivit", "VivitConfig"), + ("wav2vec2", "Wav2Vec2Config"), + ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"), + ("wavlm", "WavLMConfig"), + ("whisper", "WhisperConfig"), + ("xclip", "XCLIPConfig"), + ("xglm", "XGLMConfig"), + ("xlm", "XLMConfig"), + ("xlm-prophetnet", "XLMProphetNetConfig"), + ("xlm-roberta", "XLMRobertaConfig"), + ("xlm-roberta-xl", "XLMRobertaXLConfig"), + ("xlnet", "XLNetConfig"), + ("xmod", "XmodConfig"), + ("yolos", "YolosConfig"), + ("yoso", "YosoConfig"), + ] +) + +CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict( + [ + # Add archive maps here) + ("albert", "ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("align", "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("altclip", "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("audio-spectrogram-transformer", "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("autoformer", "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bark", "BARK_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bart", "BART_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("beit", "BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bert", "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("big_bird", "BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bigbird_pegasus", "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("biogpt", "BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bit", "BIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("blenderbot", "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("blenderbot-small", "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("blip", "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("blip-2", "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bloom", "BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bridgetower", "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("bros", "BROS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("camembert", "CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("canine", "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("chinese_clip", "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("clap", "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST"), + ("clip", "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("clipseg", "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("clvp", "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("codegen", "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("conditional_detr", "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("convbert", "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("convnext", "CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("convnextv2", "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("cpmant", "CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("ctrl", "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("cvt", "CVT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("data2vec-audio", "DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("data2vec-text", "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("data2vec-vision", "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("deberta", "DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("deberta-v2", "DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("deformable_detr", "DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("deit", "DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("deta", "DETA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("detr", "DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("dinat", "DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("dinov2", "DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("distilbert", "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("donut-swin", "DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("dpr", "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("efficientformer", "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("efficientnet", "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("encodec", "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("ernie_m", "ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("esm", "ESM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("falcon", "FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("flaubert", "FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("flava", "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("fnet", "FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("focalnet", "FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("fsmt", "FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("funnel", "FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("fuyu", "FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("git", "GIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("glpn", "GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gpt2", "GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gpt_bigcode", "GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gpt_neo", "GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gpt_neox", "GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gpt_neox_japanese", "GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gptj", "GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gptsan-japanese", "GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("graphormer", "GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("groupvit", "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("hubert", "HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("ibert", "IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("idefics", "IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("imagegpt", "IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("informer", "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("instructblip", "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("jukebox", "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("kosmos-2", "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("layoutlm", "LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("layoutlmv2", "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("layoutlmv3", "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("led", "LED_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("levit", "LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("lilt", "LILT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("llama", "LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("llava", "LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("longformer", "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("longt5", "LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("luke", "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("lxmert", "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("m2m_100", "M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("markuplm", "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mask2former", "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("maskformer", "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mbart", "MBART_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mctct", "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mega", "MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("megatron-bert", "MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mgp-str", "MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mistral", "MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mixtral", "MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mobilenet_v1", "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mobilenet_v2", "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mobilevit", "MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mobilevitv2", "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mpnet", "MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mpt", "MPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mra", "MRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("musicgen", "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("mvp", "MVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("nat", "NAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("nezha", "NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("nllb-moe", "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("nystromformer", "NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("oneformer", "ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("open-llama", "OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("openai-gpt", "OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("opt", "OPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("owlv2", "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("owlvit", "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("patchtsmixer", "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("patchtst", "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("pegasus", "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("pegasus_x", "PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("perceiver", "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("persimmon", "PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("phi", "PHI_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("pix2struct", "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("plbart", "PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("poolformer", "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("pop2piano", "POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("prophetnet", "PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("pvt", "PVT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("qdqbert", "QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("realm", "REALM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("regnet", "REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("rembert", "REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("resnet", "RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("retribert", "RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("roberta", "ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("roberta-prelayernorm", "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("roc_bert", "ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("roformer", "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("rwkv", "RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("sam", "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("seamless_m4t", "SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("seamless_m4t_v2", "SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("segformer", "SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("sew", "SEW_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("sew-d", "SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("speech_to_text", "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("speech_to_text_2", "SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("speecht5", "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("splinter", "SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("squeezebert", "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("swiftformer", "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("swin", "SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("swin2sr", "SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("swinv2", "SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("switch_transformers", "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("t5", "T5_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("table-transformer", "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("tapas", "TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("time_series_transformer", "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("timesformer", "TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("transfo-xl", "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("tvlt", "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("tvp", "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("univnet", "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("van", "VAN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("videomae", "VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vilt", "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vipllava", "VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("visual_bert", "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vit", "VIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vit_hybrid", "VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vit_mae", "VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vit_msn", "VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vitdet", "VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vitmatte", "VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vits", "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vivit", "VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("whisper", "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xclip", "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xglm", "XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xlm", "XLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xlm-prophetnet", "XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xlm-roberta", "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xlnet", "XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xmod", "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("yolos", "YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("yoso", "YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ] +) + +MODEL_NAMES_MAPPING = OrderedDict( + [ + # Add full (and cased) model names here + ("albert", "ALBERT"), + ("align", "ALIGN"), + ("altclip", "AltCLIP"), + ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"), + ("autoformer", "Autoformer"), + ("bark", "Bark"), + ("bart", "BART"), + ("barthez", "BARThez"), + ("bartpho", "BARTpho"), + ("beit", "BEiT"), + ("bert", "BERT"), + ("bert-generation", "Bert Generation"), + ("bert-japanese", "BertJapanese"), + ("bertweet", "BERTweet"), + ("big_bird", "BigBird"), + ("bigbird_pegasus", "BigBird-Pegasus"), + ("biogpt", "BioGpt"), + ("bit", "BiT"), + ("blenderbot", "Blenderbot"), + ("blenderbot-small", "BlenderbotSmall"), + ("blip", "BLIP"), + ("blip-2", "BLIP-2"), + ("bloom", "BLOOM"), + ("bort", "BORT"), + ("bridgetower", "BridgeTower"), + ("bros", "BROS"), + ("byt5", "ByT5"), + ("camembert", "CamemBERT"), + ("canine", "CANINE"), + ("chinese_clip", "Chinese-CLIP"), + ("clap", "CLAP"), + ("clip", "CLIP"), + ("clip_vision_model", "CLIPVisionModel"), + ("clipseg", "CLIPSeg"), + ("clvp", "CLVP"), + ("code_llama", "CodeLlama"), + ("codegen", "CodeGen"), + ("conditional_detr", "Conditional DETR"), + ("convbert", "ConvBERT"), + ("convnext", "ConvNeXT"), + ("convnextv2", "ConvNeXTV2"), + ("cpm", "CPM"), + ("cpmant", "CPM-Ant"), + ("ctrl", "CTRL"), + ("cvt", "CvT"), + ("data2vec-audio", "Data2VecAudio"), + ("data2vec-text", "Data2VecText"), + ("data2vec-vision", "Data2VecVision"), + ("deberta", "DeBERTa"), + ("deberta-v2", "DeBERTa-v2"), + ("decision_transformer", "Decision Transformer"), + ("deformable_detr", "Deformable DETR"), + ("deit", "DeiT"), + ("deplot", "DePlot"), + ("deta", "DETA"), + ("detr", "DETR"), + ("dialogpt", "DialoGPT"), + ("dinat", "DiNAT"), + ("dinov2", "DINOv2"), + ("distilbert", "DistilBERT"), + ("dit", "DiT"), + ("donut-swin", "DonutSwin"), + ("dpr", "DPR"), + ("dpt", "DPT"), + ("efficientformer", "EfficientFormer"), + ("efficientnet", "EfficientNet"), + ("electra", "ELECTRA"), + ("encodec", "EnCodec"), + ("encoder-decoder", "Encoder decoder"), + ("ernie", "ERNIE"), + ("ernie_m", "ErnieM"), + ("esm", "ESM"), + ("falcon", "Falcon"), + ("flan-t5", "FLAN-T5"), + ("flan-ul2", "FLAN-UL2"), + ("flaubert", "FlauBERT"), + ("flava", "FLAVA"), + ("fnet", "FNet"), + ("focalnet", "FocalNet"), + ("fsmt", "FairSeq Machine-Translation"), + ("funnel", "Funnel Transformer"), + ("fuyu", "Fuyu"), + ("git", "GIT"), + ("glpn", "GLPN"), + ("gpt-sw3", "GPT-Sw3"), + ("gpt2", "OpenAI GPT-2"), + ("gpt_bigcode", "GPTBigCode"), + ("gpt_neo", "GPT Neo"), + ("gpt_neox", "GPT NeoX"), + ("gpt_neox_japanese", "GPT NeoX Japanese"), + ("gptj", "GPT-J"), + ("gptsan-japanese", "GPTSAN-japanese"), + ("graphormer", "Graphormer"), + ("groupvit", "GroupViT"), + ("herbert", "HerBERT"), + ("hubert", "Hubert"), + ("ibert", "I-BERT"), + ("idefics", "IDEFICS"), + ("imagegpt", "ImageGPT"), + ("informer", "Informer"), + ("instructblip", "InstructBLIP"), + ("jukebox", "Jukebox"), + ("kosmos-2", "KOSMOS-2"), + ("layoutlm", "LayoutLM"), + ("layoutlmv2", "LayoutLMv2"), + ("layoutlmv3", "LayoutLMv3"), + ("layoutxlm", "LayoutXLM"), + ("led", "LED"), + ("levit", "LeViT"), + ("lilt", "LiLT"), + ("llama", "LLaMA"), + ("llama2", "Llama2"), + ("llava", "LLaVa"), + ("longformer", "Longformer"), + ("longt5", "LongT5"), + ("luke", "LUKE"), + ("lxmert", "LXMERT"), + ("m2m_100", "M2M100"), + ("madlad-400", "MADLAD-400"), + ("marian", "Marian"), + ("markuplm", "MarkupLM"), + ("mask2former", "Mask2Former"), + ("maskformer", "MaskFormer"), + ("maskformer-swin", "MaskFormerSwin"), + ("matcha", "MatCha"), + ("mbart", "mBART"), + ("mbart50", "mBART-50"), + ("mctct", "M-CTC-T"), + ("mega", "MEGA"), + ("megatron-bert", "Megatron-BERT"), + ("megatron_gpt2", "Megatron-GPT2"), + ("mgp-str", "MGP-STR"), + ("mistral", "Mistral"), + ("mixtral", "Mixtral"), + ("mluke", "mLUKE"), + ("mms", "MMS"), + ("mobilebert", "MobileBERT"), + ("mobilenet_v1", "MobileNetV1"), + ("mobilenet_v2", "MobileNetV2"), + ("mobilevit", "MobileViT"), + ("mobilevitv2", "MobileViTV2"), + ("mpnet", "MPNet"), + ("mpt", "MPT"), + ("mra", "MRA"), + ("mt5", "MT5"), + ("musicgen", "MusicGen"), + ("mvp", "MVP"), + ("nat", "NAT"), + ("nezha", "Nezha"), + ("nllb", "NLLB"), + ("nllb-moe", "NLLB-MOE"), + ("nougat", "Nougat"), + ("nystromformer", "Nyströmformer"), + ("oneformer", "OneFormer"), + ("open-llama", "OpenLlama"), + ("openai-gpt", "OpenAI GPT"), + ("opt", "OPT"), + ("owlv2", "OWLv2"), + ("owlvit", "OWL-ViT"), + ("patchtsmixer", "PatchTSMixer"), + ("patchtst", "PatchTST"), + ("pegasus", "Pegasus"), + ("pegasus_x", "PEGASUS-X"), + ("perceiver", "Perceiver"), + ("persimmon", "Persimmon"), + ("phi", "Phi"), + ("phobert", "PhoBERT"), + ("pix2struct", "Pix2Struct"), + ("plbart", "PLBart"), + ("poolformer", "PoolFormer"), + ("pop2piano", "Pop2Piano"), + ("prophetnet", "ProphetNet"), + ("pvt", "PVT"), + ("qdqbert", "QDQBert"), + ("rag", "RAG"), + ("realm", "REALM"), + ("reformer", "Reformer"), + ("regnet", "RegNet"), + ("rembert", "RemBERT"), + ("resnet", "ResNet"), + ("retribert", "RetriBERT"), + ("roberta", "RoBERTa"), + ("roberta-prelayernorm", "RoBERTa-PreLayerNorm"), + ("roc_bert", "RoCBert"), + ("roformer", "RoFormer"), + ("rwkv", "RWKV"), + ("sam", "SAM"), + ("seamless_m4t", "SeamlessM4T"), + ("seamless_m4t_v2", "SeamlessM4Tv2"), + ("segformer", "SegFormer"), + ("sew", "SEW"), + ("sew-d", "SEW-D"), + ("speech-encoder-decoder", "Speech Encoder decoder"), + ("speech_to_text", "Speech2Text"), + ("speech_to_text_2", "Speech2Text2"), + ("speecht5", "SpeechT5"), + ("splinter", "Splinter"), + ("squeezebert", "SqueezeBERT"), + ("swiftformer", "SwiftFormer"), + ("swin", "Swin Transformer"), + ("swin2sr", "Swin2SR"), + ("swinv2", "Swin Transformer V2"), + ("switch_transformers", "SwitchTransformers"), + ("t5", "T5"), + ("t5v1.1", "T5v1.1"), + ("table-transformer", "Table Transformer"), + ("tapas", "TAPAS"), + ("tapex", "TAPEX"), + ("time_series_transformer", "Time Series Transformer"), + ("timesformer", "TimeSformer"), + ("timm_backbone", "TimmBackbone"), + ("trajectory_transformer", "Trajectory Transformer"), + ("transfo-xl", "Transformer-XL"), + ("trocr", "TrOCR"), + ("tvlt", "TVLT"), + ("tvp", "TVP"), + ("ul2", "UL2"), + ("umt5", "UMT5"), + ("unispeech", "UniSpeech"), + ("unispeech-sat", "UniSpeechSat"), + ("univnet", "UnivNet"), + ("upernet", "UPerNet"), + ("van", "VAN"), + ("videomae", "VideoMAE"), + ("vilt", "ViLT"), + ("vipllava", "VipLlava"), + ("vision-encoder-decoder", "Vision Encoder decoder"), + ("vision-text-dual-encoder", "VisionTextDualEncoder"), + ("visual_bert", "VisualBERT"), + ("vit", "ViT"), + ("vit_hybrid", "ViT Hybrid"), + ("vit_mae", "ViTMAE"), + ("vit_msn", "ViTMSN"), + ("vitdet", "VitDet"), + ("vitmatte", "ViTMatte"), + ("vits", "VITS"), + ("vivit", "ViViT"), + ("wav2vec2", "Wav2Vec2"), + ("wav2vec2-conformer", "Wav2Vec2-Conformer"), + ("wav2vec2_phoneme", "Wav2Vec2Phoneme"), + ("wavlm", "WavLM"), + ("whisper", "Whisper"), + ("xclip", "X-CLIP"), + ("xglm", "XGLM"), + ("xlm", "XLM"), + ("xlm-prophetnet", "XLM-ProphetNet"), + ("xlm-roberta", "XLM-RoBERTa"), + ("xlm-roberta-xl", "XLM-RoBERTa-XL"), + ("xlm-v", "XLM-V"), + ("xlnet", "XLNet"), + ("xls_r", "XLS-R"), + ("xlsr_wav2vec2", "XLSR-Wav2Vec2"), + ("xmod", "X-MOD"), + ("yolos", "YOLOS"), + ("yoso", "YOSO"), + ] +) + +# This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting +# `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`. +DEPRECATED_MODELS = [ + "bort", + "mctct", + "mmbt", + "open_llama", + "retribert", + "tapex", + "trajectory_transformer", + "transfo_xl", + "van", +] + +SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict( + [ + ("openai-gpt", "openai"), + ("data2vec-audio", "data2vec"), + ("data2vec-text", "data2vec"), + ("data2vec-vision", "data2vec"), + ("donut-swin", "donut"), + ("kosmos-2", "kosmos2"), + ("maskformer-swin", "maskformer"), + ("xclip", "x_clip"), + ("clip_vision_model", "clip"), + ] +) + + +def model_type_to_module_name(key): + """Converts a config key to the corresponding module.""" + # Special treatment + if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: + return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] + + key = key.replace("-", "_") + if key in DEPRECATED_MODELS: + key = f"deprecated.{key}" + + return key + + +def config_class_to_model_type(config): + """Converts a config class name to the corresponding model type""" + for key, cls in CONFIG_MAPPING_NAMES.items(): + if cls == config: + return key + # if key not found check in extra content + for key, cls in CONFIG_MAPPING._extra_content.items(): + if cls.__name__ == config: + return key + return None + + +class _LazyConfigMapping(OrderedDict): + """ + A dictionary that lazily load its values when they are requested. + """ + + def __init__(self, mapping): + self._mapping = mapping + self._extra_content = {} + self._modules = {} + + def __getitem__(self, key): + if key in self._extra_content: + return self._extra_content[key] + if key not in self._mapping: + raise KeyError(key) + value = self._mapping[key] + module_name = model_type_to_module_name(key) + if module_name not in self._modules: + self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models") + if hasattr(self._modules[module_name], value): + return getattr(self._modules[module_name], value) + + # Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the + # object at the top level. + transformers_module = importlib.import_module("transformers") + return getattr(transformers_module, value) + + def keys(self): + return list(self._mapping.keys()) + list(self._extra_content.keys()) + + def values(self): + return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values()) + + def items(self): + return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items()) + + def __iter__(self): + return iter(list(self._mapping.keys()) + list(self._extra_content.keys())) + + def __contains__(self, item): + return item in self._mapping or item in self._extra_content + + def register(self, key, value, exist_ok=False): + """ + Register a new configuration in this mapping. + """ + if key in self._mapping.keys() and not exist_ok: + raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.") + self._extra_content[key] = value + + +CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES) + + +class _LazyLoadAllMappings(OrderedDict): + """ + A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values, + etc.) + + Args: + mapping: The mapping to load. + """ + + def __init__(self, mapping): + self._mapping = mapping + self._initialized = False + self._data = {} + + def _initialize(self): + if self._initialized: + return + warnings.warn( + "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP is deprecated and will be removed in v5 of Transformers. " + "It does not contain all available model checkpoints, far from it. Checkout hf.co/models for that.", + FutureWarning, + ) + + for model_type, map_name in self._mapping.items(): + module_name = model_type_to_module_name(model_type) + module = importlib.import_module(f".{module_name}", "transformers.models") + mapping = getattr(module, map_name) + self._data.update(mapping) + + self._initialized = True + + def __getitem__(self, key): + self._initialize() + return self._data[key] + + def keys(self): + self._initialize() + return self._data.keys() + + def values(self): + self._initialize() + return self._data.values() + + def items(self): + self._initialize() + return self._data.keys() + + def __iter__(self): + self._initialize() + return iter(self._data) + + def __contains__(self, item): + self._initialize() + return item in self._data + + +ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES) + + +def _get_class_name(model_class: Union[str, List[str]]): + if isinstance(model_class, (list, tuple)): + return " or ".join([f"[`{c}`]" for c in model_class if c is not None]) + return f"[`{model_class}`]" + + +def _list_model_options(indent, config_to_class=None, use_model_types=True): + if config_to_class is None and not use_model_types: + raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.") + if use_model_types: + if config_to_class is None: + model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()} + else: + model_type_to_name = { + model_type: _get_class_name(model_class) + for model_type, model_class in config_to_class.items() + if model_type in MODEL_NAMES_MAPPING + } + lines = [ + f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)" + for model_type in sorted(model_type_to_name.keys()) + ] + else: + config_to_name = { + CONFIG_MAPPING_NAMES[config]: _get_class_name(clas) + for config, clas in config_to_class.items() + if config in CONFIG_MAPPING_NAMES + } + config_to_model_name = { + config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items() + } + lines = [ + f"{indent}- [`{config_name}`] configuration class:" + f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)" + for config_name in sorted(config_to_name.keys()) + ] + return "\n".join(lines) + + +def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): + def docstring_decorator(fn): + docstrings = fn.__doc__ + lines = docstrings.split("\n") + i = 0 + while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0] + if use_model_types: + indent = f"{indent} " + lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) + docstrings = "\n".join(lines) + else: + raise ValueError( + f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current" + f" docstring is:\n{docstrings}" + ) + fn.__doc__ = docstrings + return fn + + return docstring_decorator + + +class AutoConfig: + r""" + This is a generic configuration class that will be instantiated as one of the configuration classes of the library + when created with the [`~AutoConfig.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoConfig is designed to be instantiated " + "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + def for_model(cls, model_type: str, *args, **kwargs): + if model_type in CONFIG_MAPPING: + config_class = CONFIG_MAPPING[model_type] + return config_class(*args, **kwargs) + raise ValueError( + f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}" + ) + + @classmethod + @replace_list_option_in_docstrings() + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the configuration classes of the library from a pretrained model configuration. + + The configuration class to instantiate is selected based on the `model_type` property of the config object that + is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or + namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing a configuration file saved using the + [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method, + e.g., `./my_model_directory/`. + - A path or url to a saved configuration JSON *file*, e.g., + `./my_model_directory/configuration.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download the model weights and configuration files and override the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final configuration object. + + If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a + dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the + part of `kwargs` which has not been used to update `config` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs(additional keyword arguments, *optional*): + The values in kwargs of any keys which are configuration attributes will be used to override the loaded + values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled + by the `return_unused_kwargs` keyword parameter. + + Examples: + + ```python + >>> from transformers import AutoConfig + + >>> # Download configuration from huggingface.co and cache. + >>> config = AutoConfig.from_pretrained("bert-base-uncased") + + >>> # Download configuration from huggingface.co (user-uploaded) and cache. + >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased") + + >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*). + >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/") + + >>> # Load a specific configuration file. + >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json") + + >>> # Change some config attributes when loading a pretrained config. + >>> config = AutoConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) + >>> config.output_attentions + True + + >>> config, unused_kwargs = AutoConfig.from_pretrained( + ... "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True + ... ) + >>> config.output_attentions + True + + >>> unused_kwargs + {'foo': False} + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + kwargs["_from_auto"] = True + kwargs["name_or_path"] = pretrained_model_name_or_path + trust_remote_code = kwargs.pop("trust_remote_code", None) + code_revision = kwargs.pop("code_revision", None) + + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"] + has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + class_ref = config_dict["auto_map"]["AutoConfig"] + config_class = get_class_from_dynamic_module( + class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs + ) + if os.path.isdir(pretrained_model_name_or_path): + config_class.register_for_auto_class() + return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) + elif "model_type" in config_dict: + try: + config_class = CONFIG_MAPPING[config_dict["model_type"]] + except KeyError: + raise ValueError( + f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` " + "but Transformers does not recognize this architecture. This could be because of an " + "issue with the checkpoint, or because your version of Transformers is out of date." + ) + return config_class.from_dict(config_dict, **unused_kwargs) + else: + # Fallback: use pattern matching on the string. + # We go from longer names to shorter names to catch roberta before bert (for instance) + for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True): + if pattern in str(pretrained_model_name_or_path): + return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs) + + raise ValueError( + f"Unrecognized model in {pretrained_model_name_or_path}. " + f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings " + f"in its name: {', '.join(CONFIG_MAPPING.keys())}" + ) + + @staticmethod + def register(model_type, config, exist_ok=False): + """ + Register a new configuration for this class. + + Args: + model_type (`str`): The model type like "bert" or "gpt". + config ([`PretrainedConfig`]): The config to register. + """ + if issubclass(config, PretrainedConfig) and config.model_type != model_type: + raise ValueError( + "The config you are passing has a `model_type` attribute that is not consistent with the model type " + f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they " + "match!" + ) + CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok) diff --git a/modified/models/auto/feature_extraction_auto.py b/modified/models/auto/feature_extraction_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..457217566e7cfa00c0587db785b7317e2ea77d62 --- /dev/null +++ b/modified/models/auto/feature_extraction_auto.py @@ -0,0 +1,397 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" AutoFeatureExtractor class.""" +import importlib +import json +import os +import warnings +from collections import OrderedDict +from typing import Dict, Optional, Union + +# Build the list of all feature extractors +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...feature_extraction_utils import FeatureExtractionMixin +from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + model_type_to_module_name, + replace_list_option_in_docstrings, +) + + +logger = logging.get_logger(__name__) + +FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict( + [ + ("audio-spectrogram-transformer", "ASTFeatureExtractor"), + ("beit", "BeitFeatureExtractor"), + ("chinese_clip", "ChineseCLIPFeatureExtractor"), + ("clap", "ClapFeatureExtractor"), + ("clip", "CLIPFeatureExtractor"), + ("clipseg", "ViTFeatureExtractor"), + ("clvp", "ClvpFeatureExtractor"), + ("conditional_detr", "ConditionalDetrFeatureExtractor"), + ("convnext", "ConvNextFeatureExtractor"), + ("cvt", "ConvNextFeatureExtractor"), + ("data2vec-audio", "Wav2Vec2FeatureExtractor"), + ("data2vec-vision", "BeitFeatureExtractor"), + ("deformable_detr", "DeformableDetrFeatureExtractor"), + ("deit", "DeiTFeatureExtractor"), + ("detr", "DetrFeatureExtractor"), + ("dinat", "ViTFeatureExtractor"), + ("donut-swin", "DonutFeatureExtractor"), + ("dpt", "DPTFeatureExtractor"), + ("encodec", "EncodecFeatureExtractor"), + ("flava", "FlavaFeatureExtractor"), + ("glpn", "GLPNFeatureExtractor"), + ("groupvit", "CLIPFeatureExtractor"), + ("hubert", "Wav2Vec2FeatureExtractor"), + ("imagegpt", "ImageGPTFeatureExtractor"), + ("layoutlmv2", "LayoutLMv2FeatureExtractor"), + ("layoutlmv3", "LayoutLMv3FeatureExtractor"), + ("levit", "LevitFeatureExtractor"), + ("maskformer", "MaskFormerFeatureExtractor"), + ("mctct", "MCTCTFeatureExtractor"), + ("mobilenet_v1", "MobileNetV1FeatureExtractor"), + ("mobilenet_v2", "MobileNetV2FeatureExtractor"), + ("mobilevit", "MobileViTFeatureExtractor"), + ("nat", "ViTFeatureExtractor"), + ("owlvit", "OwlViTFeatureExtractor"), + ("perceiver", "PerceiverFeatureExtractor"), + ("poolformer", "PoolFormerFeatureExtractor"), + ("pop2piano", "Pop2PianoFeatureExtractor"), + ("regnet", "ConvNextFeatureExtractor"), + ("resnet", "ConvNextFeatureExtractor"), + ("seamless_m4t", "SeamlessM4TFeatureExtractor"), + ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"), + ("segformer", "SegformerFeatureExtractor"), + ("sew", "Wav2Vec2FeatureExtractor"), + ("sew-d", "Wav2Vec2FeatureExtractor"), + ("speech_to_text", "Speech2TextFeatureExtractor"), + ("speecht5", "SpeechT5FeatureExtractor"), + ("swiftformer", "ViTFeatureExtractor"), + ("swin", "ViTFeatureExtractor"), + ("swinv2", "ViTFeatureExtractor"), + ("table-transformer", "DetrFeatureExtractor"), + ("timesformer", "VideoMAEFeatureExtractor"), + ("tvlt", "TvltFeatureExtractor"), + ("unispeech", "Wav2Vec2FeatureExtractor"), + ("unispeech-sat", "Wav2Vec2FeatureExtractor"), + ("univnet", "UnivNetFeatureExtractor"), + ("van", "ConvNextFeatureExtractor"), + ("videomae", "VideoMAEFeatureExtractor"), + ("vilt", "ViltFeatureExtractor"), + ("vit", "ViTFeatureExtractor"), + ("vit_mae", "ViTFeatureExtractor"), + ("vit_msn", "ViTFeatureExtractor"), + ("wav2vec2", "Wav2Vec2FeatureExtractor"), + ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), + ("wavlm", "Wav2Vec2FeatureExtractor"), + ("whisper", "WhisperFeatureExtractor"), + ("xclip", "CLIPFeatureExtractor"), + ("yolos", "YolosFeatureExtractor"), + ] +) + +FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) + + +def feature_extractor_class_from_name(class_name: str): + for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): + if class_name in extractors: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): + if getattr(extractor, "__name__", None) == class_name: + return extractor + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +def get_feature_extractor_config( + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + **kwargs, +): + """ + Loads the tokenizer configuration from a pretrained model tokenizer configuration. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Dict`: The configuration of the tokenizer. + + Examples: + + ```python + # Download configuration from huggingface.co and cache. + tokenizer_config = get_tokenizer_config("bert-base-uncased") + # This model does not have a tokenizer config so the result will be an empty dict. + tokenizer_config = get_tokenizer_config("xlm-roberta-base") + + # Save a pretrained tokenizer locally and you can reload its config + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + tokenizer.save_pretrained("tokenizer-test") + tokenizer_config = get_tokenizer_config("tokenizer-test") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + resolved_config_file = get_file_from_repo( + pretrained_model_name_or_path, + FEATURE_EXTRACTOR_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + if resolved_config_file is None: + logger.info( + "Could not locate the feature extractor configuration file, will try to use the model config instead." + ) + return {} + + with open(resolved_config_file, encoding="utf-8") as reader: + return json.load(reader) + + +class AutoFeatureExtractor: + r""" + This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the + library when created with the [`AutoFeatureExtractor.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoFeatureExtractor is designed to be instantiated " + "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary. + + The feature extractor class to instantiate is selected based on the `model_type` property of the config object + (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's + missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or + namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a feature extractor file saved using the + [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g., + `./my_model_directory/`. + - a path or url to a saved feature extractor JSON *file*, e.g., + `./my_model_directory/preprocessor_config.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model feature extractor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the feature extractor files and override the cached versions + if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final feature extractor object. If `True`, then this + functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of + `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are feature extractor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor + + >>> # Download feature extractor from huggingface.co and cache. + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") + + >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*) + >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + + config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) + feature_extractor_class = config_dict.get("feature_extractor_type", None) + feature_extractor_auto_map = None + if "AutoFeatureExtractor" in config_dict.get("auto_map", {}): + feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"] + + # If we don't find the feature extractor class in the feature extractor config, let's try the model config. + if feature_extractor_class is None and feature_extractor_auto_map is None: + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + # It could be in `config.feature_extractor_type`` + feature_extractor_class = getattr(config, "feature_extractor_type", None) + if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map: + feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"] + + if feature_extractor_class is not None: + feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class) + + has_remote_code = feature_extractor_auto_map is not None + has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + feature_extractor_class = get_class_from_dynamic_module( + feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs + ) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + feature_extractor_class.register_for_auto_class() + return feature_extractor_class.from_dict(config_dict, **kwargs) + elif feature_extractor_class is not None: + return feature_extractor_class.from_dict(config_dict, **kwargs) + # Last try: we use the FEATURE_EXTRACTOR_MAPPING. + elif type(config) in FEATURE_EXTRACTOR_MAPPING: + feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)] + return feature_extractor_class.from_dict(config_dict, **kwargs) + + raise ValueError( + f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " + f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " + f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}" + ) + + @staticmethod + def register(config_class, feature_extractor_class, exist_ok=False): + """ + Register a new feature extractor for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register. + """ + FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok) diff --git a/modified/models/auto/image_processing_auto.py b/modified/models/auto/image_processing_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..446c9adf1b6dc3b23870868ef1c73eb39b8794a7 --- /dev/null +++ b/modified/models/auto/image_processing_auto.py @@ -0,0 +1,427 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" AutoImageProcessor class.""" +import importlib +import json +import os +import warnings +from collections import OrderedDict +from typing import Dict, Optional, Union + +# Build the list of all image processors +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...image_processing_utils import ImageProcessingMixin +from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + model_type_to_module_name, + replace_list_option_in_docstrings, +) + + +logger = logging.get_logger(__name__) + +IMAGE_PROCESSOR_MAPPING_NAMES = OrderedDict( + [ + ("align", "EfficientNetImageProcessor"), + ("beit", "BeitImageProcessor"), + ("bit", "BitImageProcessor"), + ("blip", "BlipImageProcessor"), + ("blip-2", "BlipImageProcessor"), + ("bridgetower", "BridgeTowerImageProcessor"), + ("chinese_clip", "ChineseCLIPImageProcessor"), + ("clip", "CLIPImageProcessor"), + ("clipseg", "ViTImageProcessor"), + ("conditional_detr", "ConditionalDetrImageProcessor"), + ("convnext", "ConvNextImageProcessor"), + ("convnextv2", "ConvNextImageProcessor"), + ("cvt", "ConvNextImageProcessor"), + ("data2vec-vision", "BeitImageProcessor"), + ("deformable_detr", "DeformableDetrImageProcessor"), + ("deit", "DeiTImageProcessor"), + ("deta", "DetaImageProcessor"), + ("detr", "DetrImageProcessor"), + ("dinat", "ViTImageProcessor"), + ("dinov2", "BitImageProcessor"), + ("donut-swin", "DonutImageProcessor"), + ("dpt", "DPTImageProcessor"), + ("efficientformer", "EfficientFormerImageProcessor"), + ("efficientnet", "EfficientNetImageProcessor"), + ("flava", "FlavaImageProcessor"), + ("focalnet", "BitImageProcessor"), + ("fuyu", "FuyuImageProcessor"), + ("git", "CLIPImageProcessor"), + ("glpn", "GLPNImageProcessor"), + ("groupvit", "CLIPImageProcessor"), + ("idefics", "IdeficsImageProcessor"), + ("imagegpt", "ImageGPTImageProcessor"), + ("instructblip", "BlipImageProcessor"), + ("kosmos-2", "CLIPImageProcessor"), + ("layoutlmv2", "LayoutLMv2ImageProcessor"), + ("layoutlmv3", "LayoutLMv3ImageProcessor"), + ("levit", "LevitImageProcessor"), + ("llava", "CLIPImageProcessor"), + ("mask2former", "Mask2FormerImageProcessor"), + ("maskformer", "MaskFormerImageProcessor"), + ("mgp-str", "ViTImageProcessor"), + ("mobilenet_v1", "MobileNetV1ImageProcessor"), + ("mobilenet_v2", "MobileNetV2ImageProcessor"), + ("mobilevit", "MobileViTImageProcessor"), + ("mobilevit", "MobileViTImageProcessor"), + ("mobilevitv2", "MobileViTImageProcessor"), + ("nat", "ViTImageProcessor"), + ("nougat", "NougatImageProcessor"), + ("oneformer", "OneFormerImageProcessor"), + ("owlv2", "Owlv2ImageProcessor"), + ("owlvit", "OwlViTImageProcessor"), + ("perceiver", "PerceiverImageProcessor"), + ("pix2struct", "Pix2StructImageProcessor"), + ("poolformer", "PoolFormerImageProcessor"), + ("pvt", "PvtImageProcessor"), + ("regnet", "ConvNextImageProcessor"), + ("resnet", "ConvNextImageProcessor"), + ("sam", "SamImageProcessor"), + ("segformer", "SegformerImageProcessor"), + ("swiftformer", "ViTImageProcessor"), + ("swin", "ViTImageProcessor"), + ("swin2sr", "Swin2SRImageProcessor"), + ("swinv2", "ViTImageProcessor"), + ("table-transformer", "DetrImageProcessor"), + ("timesformer", "VideoMAEImageProcessor"), + ("tvlt", "TvltImageProcessor"), + ("tvp", "TvpImageProcessor"), + ("upernet", "SegformerImageProcessor"), + ("van", "ConvNextImageProcessor"), + ("videomae", "VideoMAEImageProcessor"), + ("vilt", "ViltImageProcessor"), + ("vipllava", "CLIPImageProcessor"), + ("vit", "ViTImageProcessor"), + ("vit_hybrid", "ViTHybridImageProcessor"), + ("vit_mae", "ViTImageProcessor"), + ("vit_msn", "ViTImageProcessor"), + ("vitmatte", "VitMatteImageProcessor"), + ("xclip", "CLIPImageProcessor"), + ("yolos", "YolosImageProcessor"), + ] +) + +IMAGE_PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) + + +def image_processor_class_from_name(class_name: str): + for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): + if class_name in extractors: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): + if getattr(extractor, "__name__", None) == class_name: + return extractor + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +def get_image_processor_config( + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + **kwargs, +): + """ + Loads the image processor configuration from a pretrained model image processor configuration. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the image processor configuration from local files. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Dict`: The configuration of the image processor. + + Examples: + + ```python + # Download configuration from huggingface.co and cache. + image_processor_config = get_image_processor_config("bert-base-uncased") + # This model does not have a image processor config so the result will be an empty dict. + image_processor_config = get_image_processor_config("xlm-roberta-base") + + # Save a pretrained image processor locally and you can reload its config + from transformers import AutoTokenizer + + image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + image_processor.save_pretrained("image-processor-test") + image_processor_config = get_image_processor_config("image-processor-test") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + resolved_config_file = get_file_from_repo( + pretrained_model_name_or_path, + IMAGE_PROCESSOR_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + if resolved_config_file is None: + logger.info( + "Could not locate the image processor configuration file, will try to use the model config instead." + ) + return {} + + with open(resolved_config_file, encoding="utf-8") as reader: + return json.load(reader) + + +class AutoImageProcessor: + r""" + This is a generic image processor class that will be instantiated as one of the image processor classes of the + library when created with the [`AutoImageProcessor.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoImageProcessor is designed to be instantiated " + "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the image processor classes of the library from a pretrained model vocabulary. + + The image processor class to instantiate is selected based on the `model_type` property of the config object + (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's + missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained image_processor hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or + namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a image processor file saved using the + [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g., + `./my_model_directory/`. + - a path or url to a saved image processor JSON *file*, e.g., + `./my_model_directory/preprocessor_config.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model image processor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the image processor files and override the cached versions if + they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final image processor object. If `True`, then this + functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of + `kwargs` which has not been used to update `image_processor` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are image processor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + >>> from transformers import AutoImageProcessor + + >>> # Download image processor from huggingface.co and cache. + >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + + >>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*) + >>> # image_processor = AutoImageProcessor.from_pretrained("./test/saved_model/") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + + config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) + image_processor_class = config_dict.get("image_processor_type", None) + image_processor_auto_map = None + if "AutoImageProcessor" in config_dict.get("auto_map", {}): + image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"] + + # If we still don't have the image processor class, check if we're loading from a previous feature extractor config + # and if so, infer the image processor class from there. + if image_processor_class is None and image_processor_auto_map is None: + feature_extractor_class = config_dict.pop("feature_extractor_type", None) + if feature_extractor_class is not None: + logger.warning( + "Could not find image processor class in the image processor config or the model config. Loading" + " based on pattern matching with the model's feature extractor configuration." + ) + image_processor_class = feature_extractor_class.replace("FeatureExtractor", "ImageProcessor") + if "AutoFeatureExtractor" in config_dict.get("auto_map", {}): + feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"] + image_processor_auto_map = feature_extractor_auto_map.replace("FeatureExtractor", "ImageProcessor") + logger.warning( + "Could not find image processor auto map in the image processor config or the model config." + " Loading based on pattern matching with the model's feature extractor configuration." + ) + + # If we don't find the image processor class in the image processor config, let's try the model config. + if image_processor_class is None and image_processor_auto_map is None: + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + # It could be in `config.image_processor_type`` + image_processor_class = getattr(config, "image_processor_type", None) + if hasattr(config, "auto_map") and "AutoImageProcessor" in config.auto_map: + image_processor_auto_map = config.auto_map["AutoImageProcessor"] + + if image_processor_class is not None: + image_processor_class = image_processor_class_from_name(image_processor_class) + + has_remote_code = image_processor_auto_map is not None + has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + image_processor_class = get_class_from_dynamic_module( + image_processor_auto_map, pretrained_model_name_or_path, **kwargs + ) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + image_processor_class.register_for_auto_class() + return image_processor_class.from_dict(config_dict, **kwargs) + elif image_processor_class is not None: + return image_processor_class.from_dict(config_dict, **kwargs) + # Last try: we use the IMAGE_PROCESSOR_MAPPING. + elif type(config) in IMAGE_PROCESSOR_MAPPING: + image_processor_class = IMAGE_PROCESSOR_MAPPING[type(config)] + return image_processor_class.from_dict(config_dict, **kwargs) + + raise ValueError( + f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a " + f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following " + f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}" + ) + + @staticmethod + def register(config_class, image_processor_class, exist_ok=False): + """ + Register a new image processor for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + image_processor_class ([`ImageProcessingMixin`]): The image processor to register. + """ + IMAGE_PROCESSOR_MAPPING.register(config_class, image_processor_class, exist_ok=exist_ok) diff --git a/modified/models/auto/modeling_auto.py b/modified/models/auto/modeling_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..9978b1353035e38f8c0c874e49ad9d2fdc87c653 --- /dev/null +++ b/modified/models/auto/modeling_auto.py @@ -0,0 +1,1573 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Model class.""" + +import warnings +from collections import OrderedDict + +from ...utils import logging +from .auto_factory import ( + _BaseAutoBackboneClass, + _BaseAutoModelClass, + _LazyAutoMapping, + auto_class_update, +) +from .configuration_auto import CONFIG_MAPPING_NAMES + + +logger = logging.get_logger(__name__) + + +MODEL_MAPPING_NAMES = OrderedDict( + [ + # Base model mapping + ("albert", "AlbertModel"), + ("align", "AlignModel"), + ("altclip", "AltCLIPModel"), + ("audio-spectrogram-transformer", "ASTModel"), + ("autoformer", "AutoformerModel"), + ("bark", "BarkModel"), + ("bart", "BartModel"), + ("beit", "BeitModel"), + ("bert", "BertModel"), + ("bert-generation", "BertGenerationEncoder"), + ("big_bird", "BigBirdModel"), + ("bigbird_pegasus", "BigBirdPegasusModel"), + ("biogpt", "BioGptModel"), + ("bit", "BitModel"), + ("blenderbot", "BlenderbotModel"), + ("blenderbot-small", "BlenderbotSmallModel"), + ("blip", "BlipModel"), + ("blip-2", "Blip2Model"), + ("bloom", "BloomModel"), + ("bridgetower", "BridgeTowerModel"), + ("bros", "BrosModel"), + ("camembert", "CamembertModel"), + ("canine", "CanineModel"), + ("chinese_clip", "ChineseCLIPModel"), + ("clap", "ClapModel"), + ("clip", "CLIPModel"), + ("clip_vision_model", "CLIPVisionModel"), + ("clipseg", "CLIPSegModel"), + ("clvp", "ClvpModelForConditionalGeneration"), + ("code_llama", "LlamaModel"), + ("codegen", "CodeGenModel"), + ("conditional_detr", "ConditionalDetrModel"), + ("convbert", "ConvBertModel"), + ("convnext", "ConvNextModel"), + ("convnextv2", "ConvNextV2Model"), + ("cpmant", "CpmAntModel"), + ("ctrl", "CTRLModel"), + ("cvt", "CvtModel"), + ("data2vec-audio", "Data2VecAudioModel"), + ("data2vec-text", "Data2VecTextModel"), + ("data2vec-vision", "Data2VecVisionModel"), + ("deberta", "DebertaModel"), + ("deberta-v2", "DebertaV2Model"), + ("decision_transformer", "DecisionTransformerModel"), + ("deformable_detr", "DeformableDetrModel"), + ("deit", "DeiTModel"), + ("deta", "DetaModel"), + ("detr", "DetrModel"), + ("dinat", "DinatModel"), + ("dinov2", "Dinov2Model"), + ("distilbert", "DistilBertModel"), + ("donut-swin", "DonutSwinModel"), + ("dpr", "DPRQuestionEncoder"), + ("dpt", "DPTModel"), + ("efficientformer", "EfficientFormerModel"), + ("efficientnet", "EfficientNetModel"), + ("electra", "ElectraModel"), + ("encodec", "EncodecModel"), + ("ernie", "ErnieModel"), + ("ernie_m", "ErnieMModel"), + ("esm", "EsmModel"), + ("falcon", "FalconModel"), + ("flaubert", "FlaubertModel"), + ("flava", "FlavaModel"), + ("fnet", "FNetModel"), + ("focalnet", "FocalNetModel"), + ("fsmt", "FSMTModel"), + ("funnel", ("FunnelModel", "FunnelBaseModel")), + ("git", "GitModel"), + ("glpn", "GLPNModel"), + ("gpt-sw3", "GPT2Model"), + ("gpt2", "GPT2Model"), + ("gpt_bigcode", "GPTBigCodeModel"), + ("gpt_neo", "GPTNeoModel"), + ("gpt_neox", "GPTNeoXModel"), + ("gpt_neox_japanese", "GPTNeoXJapaneseModel"), + ("gptj", "GPTJModel"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("graphormer", "GraphormerModel"), + ("groupvit", "GroupViTModel"), + ("hubert", "HubertModel"), + ("ibert", "IBertModel"), + ("idefics", "IdeficsModel"), + ("imagegpt", "ImageGPTModel"), + ("informer", "InformerModel"), + ("jukebox", "JukeboxModel"), + ("kosmos-2", "Kosmos2Model"), + ("layoutlm", "LayoutLMModel"), + ("layoutlmv2", "LayoutLMv2Model"), + ("layoutlmv3", "LayoutLMv3Model"), + ("led", "LEDModel"), + ("levit", "LevitModel"), + ("lilt", "LiltModel"), + ("llama", "LlamaModel"), + ("longformer", "LongformerModel"), + ("longt5", "LongT5Model"), + ("luke", "LukeModel"), + ("lxmert", "LxmertModel"), + ("m2m_100", "M2M100Model"), + ("marian", "MarianModel"), + ("markuplm", "MarkupLMModel"), + ("mask2former", "Mask2FormerModel"), + ("maskformer", "MaskFormerModel"), + ("maskformer-swin", "MaskFormerSwinModel"), + ("mbart", "MBartModel"), + ("mctct", "MCTCTModel"), + ("mega", "MegaModel"), + ("megatron-bert", "MegatronBertModel"), + ("mgp-str", "MgpstrForSceneTextRecognition"), + ("mistral", "MistralModel"), + ("mixtral", "MixtralModel"), + ("mobilebert", "MobileBertModel"), + ("mobilenet_v1", "MobileNetV1Model"), + ("mobilenet_v2", "MobileNetV2Model"), + ("mobilevit", "MobileViTModel"), + ("mobilevitv2", "MobileViTV2Model"), + ("mpnet", "MPNetModel"), + ("mpt", "MptModel"), + ("mra", "MraModel"), + ("mt5", "MT5Model"), + ("mvp", "MvpModel"), + ("nat", "NatModel"), + ("nezha", "NezhaModel"), + ("nllb-moe", "NllbMoeModel"), + ("nystromformer", "NystromformerModel"), + ("oneformer", "OneFormerModel"), + ("open-llama", "OpenLlamaModel"), + ("openai-gpt", "OpenAIGPTModel"), + ("opt", "OPTModel"), + ("owlv2", "Owlv2Model"), + ("owlvit", "OwlViTModel"), + ("patchtsmixer", "PatchTSMixerModel"), + ("patchtst", "PatchTSTModel"), + ("pegasus", "PegasusModel"), + ("pegasus_x", "PegasusXModel"), + ("perceiver", "PerceiverModel"), + ("persimmon", "PersimmonModel"), + ("phi", "PhiModel"), + ("plbart", "PLBartModel"), + ("poolformer", "PoolFormerModel"), + ("prophetnet", "ProphetNetModel"), + ("pvt", "PvtModel"), + ("qdqbert", "QDQBertModel"), + ("reformer", "ReformerModel"), + ("regnet", "RegNetModel"), + ("rembert", "RemBertModel"), + ("resnet", "ResNetModel"), + ("retribert", "RetriBertModel"), + ("roberta", "RobertaModel"), + ("roberta-prelayernorm", "RobertaPreLayerNormModel"), + ("roc_bert", "RoCBertModel"), + ("roformer", "RoFormerModel"), + ("rwkv", "RwkvModel"), + ("sam", "SamModel"), + ("seamless_m4t", "SeamlessM4TModel"), + ("seamless_m4t_v2", "SeamlessM4Tv2Model"), + ("segformer", "SegformerModel"), + ("sew", "SEWModel"), + ("sew-d", "SEWDModel"), + ("speech_to_text", "Speech2TextModel"), + ("speecht5", "SpeechT5Model"), + ("splinter", "SplinterModel"), + ("squeezebert", "SqueezeBertModel"), + ("swiftformer", "SwiftFormerModel"), + ("swin", "SwinModel"), + ("swin2sr", "Swin2SRModel"), + ("swinv2", "Swinv2Model"), + ("switch_transformers", "SwitchTransformersModel"), + ("t5", "T5Model"), + ("table-transformer", "TableTransformerModel"), + ("tapas", "TapasModel"), + ("time_series_transformer", "TimeSeriesTransformerModel"), + ("timesformer", "TimesformerModel"), + ("timm_backbone", "TimmBackbone"), + ("trajectory_transformer", "TrajectoryTransformerModel"), + ("transfo-xl", "TransfoXLModel"), + ("tvlt", "TvltModel"), + ("tvp", "TvpModel"), + ("umt5", "UMT5Model"), + ("unispeech", "UniSpeechModel"), + ("unispeech-sat", "UniSpeechSatModel"), + ("univnet", "UnivNetModel"), + ("van", "VanModel"), + ("videomae", "VideoMAEModel"), + ("vilt", "ViltModel"), + ("vision-text-dual-encoder", "VisionTextDualEncoderModel"), + ("visual_bert", "VisualBertModel"), + ("vit", "ViTModel"), + ("vit_hybrid", "ViTHybridModel"), + ("vit_mae", "ViTMAEModel"), + ("vit_msn", "ViTMSNModel"), + ("vitdet", "VitDetModel"), + ("vits", "VitsModel"), + ("vivit", "VivitModel"), + ("wav2vec2", "Wav2Vec2Model"), + ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), + ("wavlm", "WavLMModel"), + ("whisper", "WhisperModel"), + ("xclip", "XCLIPModel"), + ("xglm", "XGLMModel"), + ("xlm", "XLMModel"), + ("xlm-prophetnet", "XLMProphetNetModel"), + ("xlm-roberta", "XLMRobertaModel"), + ("xlm-roberta-xl", "XLMRobertaXLModel"), + ("xlnet", "XLNetModel"), + ("xmod", "XmodModel"), + ("yolos", "YolosModel"), + ("yoso", "YosoModel"), + ] +) + +MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( + [ + # Model for pre-training mapping + ("albert", "AlbertForPreTraining"), + ("bart", "BartForConditionalGeneration"), + ("bert", "BertForPreTraining"), + ("big_bird", "BigBirdForPreTraining"), + ("bloom", "BloomForCausalLM"), + ("camembert", "CamembertForMaskedLM"), + ("ctrl", "CTRLLMHeadModel"), + ("data2vec-text", "Data2VecTextForMaskedLM"), + ("deberta", "DebertaForMaskedLM"), + ("deberta-v2", "DebertaV2ForMaskedLM"), + ("distilbert", "DistilBertForMaskedLM"), + ("electra", "ElectraForPreTraining"), + ("ernie", "ErnieForPreTraining"), + ("flaubert", "FlaubertWithLMHeadModel"), + ("flava", "FlavaForPreTraining"), + ("fnet", "FNetForPreTraining"), + ("fsmt", "FSMTForConditionalGeneration"), + ("funnel", "FunnelForPreTraining"), + ("gpt-sw3", "GPT2LMHeadModel"), + ("gpt2", "GPT2LMHeadModel"), + ("gpt_bigcode", "GPTBigCodeForCausalLM"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("ibert", "IBertForMaskedLM"), + ("idefics", "IdeficsForVisionText2Text"), + ("layoutlm", "LayoutLMForMaskedLM"), + ("llava", "LlavaForConditionalGeneration"), + ("longformer", "LongformerForMaskedLM"), + ("luke", "LukeForMaskedLM"), + ("lxmert", "LxmertForPreTraining"), + ("mega", "MegaForMaskedLM"), + ("megatron-bert", "MegatronBertForPreTraining"), + ("mobilebert", "MobileBertForPreTraining"), + ("mpnet", "MPNetForMaskedLM"), + ("mpt", "MptForCausalLM"), + ("mra", "MraForMaskedLM"), + ("mvp", "MvpForConditionalGeneration"), + ("nezha", "NezhaForPreTraining"), + ("nllb-moe", "NllbMoeForConditionalGeneration"), + ("openai-gpt", "OpenAIGPTLMHeadModel"), + ("retribert", "RetriBertModel"), + ("roberta", "RobertaForMaskedLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), + ("roc_bert", "RoCBertForPreTraining"), + ("rwkv", "RwkvForCausalLM"), + ("splinter", "SplinterForPreTraining"), + ("squeezebert", "SqueezeBertForMaskedLM"), + ("switch_transformers", "SwitchTransformersForConditionalGeneration"), + ("t5", "T5ForConditionalGeneration"), + ("tapas", "TapasForMaskedLM"), + ("transfo-xl", "TransfoXLLMHeadModel"), + ("tvlt", "TvltForPreTraining"), + ("unispeech", "UniSpeechForPreTraining"), + ("unispeech-sat", "UniSpeechSatForPreTraining"), + ("videomae", "VideoMAEForPreTraining"), + ("vipllava", "VipLlavaForConditionalGeneration"), + ("visual_bert", "VisualBertForPreTraining"), + ("vit_mae", "ViTMAEForPreTraining"), + ("wav2vec2", "Wav2Vec2ForPreTraining"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-roberta", "XLMRobertaForMaskedLM"), + ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), + ("xlnet", "XLNetLMHeadModel"), + ("xmod", "XmodForMaskedLM"), + ] +) + +MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict( + [ + # Model with LM heads mapping + ("albert", "AlbertForMaskedLM"), + ("bart", "BartForConditionalGeneration"), + ("bert", "BertForMaskedLM"), + ("big_bird", "BigBirdForMaskedLM"), + ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), + ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), + ("bloom", "BloomForCausalLM"), + ("camembert", "CamembertForMaskedLM"), + ("codegen", "CodeGenForCausalLM"), + ("convbert", "ConvBertForMaskedLM"), + ("cpmant", "CpmAntForCausalLM"), + ("ctrl", "CTRLLMHeadModel"), + ("data2vec-text", "Data2VecTextForMaskedLM"), + ("deberta", "DebertaForMaskedLM"), + ("deberta-v2", "DebertaV2ForMaskedLM"), + ("distilbert", "DistilBertForMaskedLM"), + ("electra", "ElectraForMaskedLM"), + ("encoder-decoder", "EncoderDecoderModel"), + ("ernie", "ErnieForMaskedLM"), + ("esm", "EsmForMaskedLM"), + ("flaubert", "FlaubertWithLMHeadModel"), + ("fnet", "FNetForMaskedLM"), + ("fsmt", "FSMTForConditionalGeneration"), + ("funnel", "FunnelForMaskedLM"), + ("git", "GitForCausalLM"), + ("gpt-sw3", "GPT2LMHeadModel"), + ("gpt2", "GPT2LMHeadModel"), + ("gpt_bigcode", "GPTBigCodeForCausalLM"), + ("gpt_neo", "GPTNeoForCausalLM"), + ("gpt_neox", "GPTNeoXForCausalLM"), + ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), + ("gptj", "GPTJForCausalLM"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("ibert", "IBertForMaskedLM"), + ("layoutlm", "LayoutLMForMaskedLM"), + ("led", "LEDForConditionalGeneration"), + ("longformer", "LongformerForMaskedLM"), + ("longt5", "LongT5ForConditionalGeneration"), + ("luke", "LukeForMaskedLM"), + ("m2m_100", "M2M100ForConditionalGeneration"), + ("marian", "MarianMTModel"), + ("mega", "MegaForMaskedLM"), + ("megatron-bert", "MegatronBertForCausalLM"), + ("mobilebert", "MobileBertForMaskedLM"), + ("mpnet", "MPNetForMaskedLM"), + ("mpt", "MptForCausalLM"), + ("mra", "MraForMaskedLM"), + ("mvp", "MvpForConditionalGeneration"), + ("nezha", "NezhaForMaskedLM"), + ("nllb-moe", "NllbMoeForConditionalGeneration"), + ("nystromformer", "NystromformerForMaskedLM"), + ("openai-gpt", "OpenAIGPTLMHeadModel"), + ("pegasus_x", "PegasusXForConditionalGeneration"), + ("plbart", "PLBartForConditionalGeneration"), + ("pop2piano", "Pop2PianoForConditionalGeneration"), + ("qdqbert", "QDQBertForMaskedLM"), + ("reformer", "ReformerModelWithLMHead"), + ("rembert", "RemBertForMaskedLM"), + ("roberta", "RobertaForMaskedLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), + ("roc_bert", "RoCBertForMaskedLM"), + ("roformer", "RoFormerForMaskedLM"), + ("rwkv", "RwkvForCausalLM"), + ("speech_to_text", "Speech2TextForConditionalGeneration"), + ("squeezebert", "SqueezeBertForMaskedLM"), + ("switch_transformers", "SwitchTransformersForConditionalGeneration"), + ("t5", "T5ForConditionalGeneration"), + ("tapas", "TapasForMaskedLM"), + ("transfo-xl", "TransfoXLLMHeadModel"), + ("wav2vec2", "Wav2Vec2ForMaskedLM"), + ("whisper", "WhisperForConditionalGeneration"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-roberta", "XLMRobertaForMaskedLM"), + ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), + ("xlnet", "XLNetLMHeadModel"), + ("xmod", "XmodForMaskedLM"), + ("yoso", "YosoForMaskedLM"), + ] +) + +MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Causal LM mapping + ("bart", "BartForCausalLM"), + ("bert", "BertLMHeadModel"), + ("bert-generation", "BertGenerationDecoder"), + ("big_bird", "BigBirdForCausalLM"), + ("bigbird_pegasus", "BigBirdPegasusForCausalLM"), + ("biogpt", "BioGptForCausalLM"), + ("blenderbot", "BlenderbotForCausalLM"), + ("blenderbot-small", "BlenderbotSmallForCausalLM"), + ("bloom", "BloomForCausalLM"), + ("camembert", "CamembertForCausalLM"), + ("code_llama", "LlamaForCausalLM"), + ("codegen", "CodeGenForCausalLM"), + ("cpmant", "CpmAntForCausalLM"), + ("ctrl", "CTRLLMHeadModel"), + ("data2vec-text", "Data2VecTextForCausalLM"), + ("electra", "ElectraForCausalLM"), + ("ernie", "ErnieForCausalLM"), + ("falcon", "FalconForCausalLM"), + ("fuyu", "FuyuForCausalLM"), + ("git", "GitForCausalLM"), + ("gpt-sw3", "GPT2LMHeadModel"), + ("gpt2", "GPT2LMHeadModel"), + ("gpt_bigcode", "GPTBigCodeForCausalLM"), + ("gpt_neo", "GPTNeoForCausalLM"), + ("gpt_neox", "GPTNeoXForCausalLM"), + ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), + ("gptj", "GPTJForCausalLM"), + ("llama", "LlamaForCausalLM"), + ("marian", "MarianForCausalLM"), + ("mbart", "MBartForCausalLM"), + ("mega", "MegaForCausalLM"), + ("megatron-bert", "MegatronBertForCausalLM"), + ("mistral", "MistralForCausalLM"), + ("mixtral", "MixtralForCausalLM"), + ("mpt", "MptForCausalLM"), + ("musicgen", "MusicgenForCausalLM"), + ("mvp", "MvpForCausalLM"), + ("open-llama", "OpenLlamaForCausalLM"), + ("openai-gpt", "OpenAIGPTLMHeadModel"), + ("opt", "OPTForCausalLM"), + ("pegasus", "PegasusForCausalLM"), + ("persimmon", "PersimmonForCausalLM"), + ("phi", "PhiForCausalLM"), + ("plbart", "PLBartForCausalLM"), + ("prophetnet", "ProphetNetForCausalLM"), + ("qdqbert", "QDQBertLMHeadModel"), + ("reformer", "ReformerModelWithLMHead"), + ("rembert", "RemBertForCausalLM"), + ("roberta", "RobertaForCausalLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForCausalLM"), + ("roc_bert", "RoCBertForCausalLM"), + ("roformer", "RoFormerForCausalLM"), + ("rwkv", "RwkvForCausalLM"), + ("speech_to_text_2", "Speech2Text2ForCausalLM"), + ("transfo-xl", "TransfoXLLMHeadModel"), + ("trocr", "TrOCRForCausalLM"), + ("whisper", "WhisperForCausalLM"), + ("xglm", "XGLMForCausalLM"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-prophetnet", "XLMProphetNetForCausalLM"), + ("xlm-roberta", "XLMRobertaForCausalLM"), + ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"), + ("xlnet", "XLNetLMHeadModel"), + ("xmod", "XmodForCausalLM"), + ] +) + +MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( + [ + ("deit", "DeiTForMaskedImageModeling"), + ("focalnet", "FocalNetForMaskedImageModeling"), + ("swin", "SwinForMaskedImageModeling"), + ("swinv2", "Swinv2ForMaskedImageModeling"), + ("vit", "ViTForMaskedImageModeling"), + ] +) + + +MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( + # Model for Causal Image Modeling mapping + [ + ("imagegpt", "ImageGPTForCausalImageModeling"), + ] +) + +MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Image Classification mapping + ("beit", "BeitForImageClassification"), + ("bit", "BitForImageClassification"), + ("convnext", "ConvNextForImageClassification"), + ("convnextv2", "ConvNextV2ForImageClassification"), + ("cvt", "CvtForImageClassification"), + ("data2vec-vision", "Data2VecVisionForImageClassification"), + ( + "deit", + ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher"), + ), + ("dinat", "DinatForImageClassification"), + ("dinov2", "Dinov2ForImageClassification"), + ( + "efficientformer", + ( + "EfficientFormerForImageClassification", + "EfficientFormerForImageClassificationWithTeacher", + ), + ), + ("efficientnet", "EfficientNetForImageClassification"), + ("focalnet", "FocalNetForImageClassification"), + ("imagegpt", "ImageGPTForImageClassification"), + ( + "levit", + ("LevitForImageClassification", "LevitForImageClassificationWithTeacher"), + ), + ("mobilenet_v1", "MobileNetV1ForImageClassification"), + ("mobilenet_v2", "MobileNetV2ForImageClassification"), + ("mobilevit", "MobileViTForImageClassification"), + ("mobilevitv2", "MobileViTV2ForImageClassification"), + ("nat", "NatForImageClassification"), + ( + "perceiver", + ( + "PerceiverForImageClassificationLearned", + "PerceiverForImageClassificationFourier", + "PerceiverForImageClassificationConvProcessing", + ), + ), + ("poolformer", "PoolFormerForImageClassification"), + ("pvt", "PvtForImageClassification"), + ("regnet", "RegNetForImageClassification"), + ("resnet", "ResNetForImageClassification"), + ("segformer", "SegformerForImageClassification"), + ("swiftformer", "SwiftFormerForImageClassification"), + ("swin", "SwinForImageClassification"), + ("swinv2", "Swinv2ForImageClassification"), + ("van", "VanForImageClassification"), + ("vit", "ViTForImageClassification"), + ("vit_hybrid", "ViTHybridForImageClassification"), + ("vit_msn", "ViTMSNForImageClassification"), + ] +) + +MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Do not add new models here, this class will be deprecated in the future. + # Model for Image Segmentation mapping + ("detr", "DetrForSegmentation"), + ] +) + +MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Semantic Segmentation mapping + ("beit", "BeitForSemanticSegmentation"), + ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"), + ("dpt", "DPTForSemanticSegmentation"), + ("mobilenet_v2", "MobileNetV2ForSemanticSegmentation"), + ("mobilevit", "MobileViTForSemanticSegmentation"), + ("mobilevitv2", "MobileViTV2ForSemanticSegmentation"), + ("segformer", "SegformerForSemanticSegmentation"), + ("upernet", "UperNetForSemanticSegmentation"), + ] +) + +MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Instance Segmentation mapping + # MaskFormerForInstanceSegmentation can be removed from this mapping in v5 + ("maskformer", "MaskFormerForInstanceSegmentation"), + ] +) + +MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Universal Segmentation mapping + ("detr", "DetrForSegmentation"), + ("mask2former", "Mask2FormerForUniversalSegmentation"), + ("maskformer", "MaskFormerForInstanceSegmentation"), + ("oneformer", "OneFormerForUniversalSegmentation"), + ] +) + +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("timesformer", "TimesformerForVideoClassification"), + ("videomae", "VideoMAEForVideoClassification"), + ("vivit", "VivitForVideoClassification"), + ] +) + +MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("blip", "BlipForConditionalGeneration"), + ("blip-2", "Blip2ForConditionalGeneration"), + ("git", "GitForCausalLM"), + ("instructblip", "InstructBlipForConditionalGeneration"), + ("kosmos-2", "Kosmos2ForConditionalGeneration"), + ("llava", "LlavaForConditionalGeneration"), + ("pix2struct", "Pix2StructForConditionalGeneration"), + ("vipllava", "VipLlavaForConditionalGeneration"), + ("vision-encoder-decoder", "VisionEncoderDecoderModel"), + ] +) + +MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Masked LM mapping + ("albert", "AlbertForMaskedLM"), + ("bart", "BartForConditionalGeneration"), + ("bert", "BertForMaskedLM"), + ("big_bird", "BigBirdForMaskedLM"), + ("camembert", "CamembertForMaskedLM"), + ("convbert", "ConvBertForMaskedLM"), + ("data2vec-text", "Data2VecTextForMaskedLM"), + ("deberta", "DebertaForMaskedLM"), + ("deberta-v2", "DebertaV2ForMaskedLM"), + ("distilbert", "DistilBertForMaskedLM"), + ("electra", "ElectraForMaskedLM"), + ("ernie", "ErnieForMaskedLM"), + ("esm", "EsmForMaskedLM"), + ("flaubert", "FlaubertWithLMHeadModel"), + ("fnet", "FNetForMaskedLM"), + ("funnel", "FunnelForMaskedLM"), + ("ibert", "IBertForMaskedLM"), + ("layoutlm", "LayoutLMForMaskedLM"), + ("longformer", "LongformerForMaskedLM"), + ("luke", "LukeForMaskedLM"), + ("mbart", "MBartForConditionalGeneration"), + ("mega", "MegaForMaskedLM"), + ("megatron-bert", "MegatronBertForMaskedLM"), + ("mobilebert", "MobileBertForMaskedLM"), + ("mpnet", "MPNetForMaskedLM"), + ("mra", "MraForMaskedLM"), + ("mvp", "MvpForConditionalGeneration"), + ("nezha", "NezhaForMaskedLM"), + ("nystromformer", "NystromformerForMaskedLM"), + ("perceiver", "PerceiverForMaskedLM"), + ("qdqbert", "QDQBertForMaskedLM"), + ("reformer", "ReformerForMaskedLM"), + ("rembert", "RemBertForMaskedLM"), + ("roberta", "RobertaForMaskedLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), + ("roc_bert", "RoCBertForMaskedLM"), + ("roformer", "RoFormerForMaskedLM"), + ("squeezebert", "SqueezeBertForMaskedLM"), + ("tapas", "TapasForMaskedLM"), + ("wav2vec2", "Wav2Vec2ForMaskedLM"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-roberta", "XLMRobertaForMaskedLM"), + ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), + ("xmod", "XmodForMaskedLM"), + ("yoso", "YosoForMaskedLM"), + ] +) + +MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( + [ + # Model for Object Detection mapping + ("conditional_detr", "ConditionalDetrForObjectDetection"), + ("deformable_detr", "DeformableDetrForObjectDetection"), + ("deta", "DetaForObjectDetection"), + ("detr", "DetrForObjectDetection"), + ("table-transformer", "TableTransformerForObjectDetection"), + ("yolos", "YolosForObjectDetection"), + ] +) + +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Object Detection mapping + ("owlv2", "Owlv2ForObjectDetection"), + ("owlvit", "OwlViTForObjectDetection"), + ] +) + +MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict( + [ + # Model for depth estimation mapping + ("dpt", "DPTForDepthEstimation"), + ("glpn", "GLPNForDepthEstimation"), + ] +) +MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Seq2Seq Causal LM mapping + ("bart", "BartForConditionalGeneration"), + ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), + ("blenderbot", "BlenderbotForConditionalGeneration"), + ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), + ("encoder-decoder", "EncoderDecoderModel"), + ("fsmt", "FSMTForConditionalGeneration"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("led", "LEDForConditionalGeneration"), + ("longt5", "LongT5ForConditionalGeneration"), + ("m2m_100", "M2M100ForConditionalGeneration"), + ("marian", "MarianMTModel"), + ("mbart", "MBartForConditionalGeneration"), + ("mt5", "MT5ForConditionalGeneration"), + ("mvp", "MvpForConditionalGeneration"), + ("nllb-moe", "NllbMoeForConditionalGeneration"), + ("pegasus", "PegasusForConditionalGeneration"), + ("pegasus_x", "PegasusXForConditionalGeneration"), + ("plbart", "PLBartForConditionalGeneration"), + ("prophetnet", "ProphetNetForConditionalGeneration"), + ("seamless_m4t", "SeamlessM4TForTextToText"), + ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToText"), + ("switch_transformers", "SwitchTransformersForConditionalGeneration"), + ("t5", "T5ForConditionalGeneration"), + ("umt5", "UMT5ForConditionalGeneration"), + ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"), + ] +) + +MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("pop2piano", "Pop2PianoForConditionalGeneration"), + ("seamless_m4t", "SeamlessM4TForSpeechToText"), + ("seamless_m4t_v2", "SeamlessM4Tv2ForSpeechToText"), + ("speech-encoder-decoder", "SpeechEncoderDecoderModel"), + ("speech_to_text", "Speech2TextForConditionalGeneration"), + ("speecht5", "SpeechT5ForSpeechToText"), + ("whisper", "WhisperForConditionalGeneration"), + ] +) + +MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Sequence Classification mapping + ("albert", "AlbertForSequenceClassification"), + ("bart", "BartForSequenceClassification"), + ("bert", "BertForSequenceClassification"), + ("big_bird", "BigBirdForSequenceClassification"), + ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"), + ("biogpt", "BioGptForSequenceClassification"), + ("bloom", "BloomForSequenceClassification"), + ("camembert", "CamembertForSequenceClassification"), + ("canine", "CanineForSequenceClassification"), + ("code_llama", "LlamaForSequenceClassification"), + ("convbert", "ConvBertForSequenceClassification"), + ("ctrl", "CTRLForSequenceClassification"), + ("data2vec-text", "Data2VecTextForSequenceClassification"), + ("deberta", "DebertaForSequenceClassification"), + ("deberta-v2", "DebertaV2ForSequenceClassification"), + ("distilbert", "DistilBertForSequenceClassification"), + ("electra", "ElectraForSequenceClassification"), + ("ernie", "ErnieForSequenceClassification"), + ("ernie_m", "ErnieMForSequenceClassification"), + ("esm", "EsmForSequenceClassification"), + ("falcon", "FalconForSequenceClassification"), + ("flaubert", "FlaubertForSequenceClassification"), + ("fnet", "FNetForSequenceClassification"), + ("funnel", "FunnelForSequenceClassification"), + ("gpt-sw3", "GPT2ForSequenceClassification"), + ("gpt2", "GPT2ForSequenceClassification"), + ("gpt_bigcode", "GPTBigCodeForSequenceClassification"), + ("gpt_neo", "GPTNeoForSequenceClassification"), + ("gpt_neox", "GPTNeoXForSequenceClassification"), + ("gptj", "GPTJForSequenceClassification"), + ("ibert", "IBertForSequenceClassification"), + ("layoutlm", "LayoutLMForSequenceClassification"), + ("layoutlmv2", "LayoutLMv2ForSequenceClassification"), + ("layoutlmv3", "LayoutLMv3ForSequenceClassification"), + ("led", "LEDForSequenceClassification"), + ("lilt", "LiltForSequenceClassification"), + ("llama", "LlamaForSequenceClassification"), + ("longformer", "LongformerForSequenceClassification"), + ("luke", "LukeForSequenceClassification"), + ("markuplm", "MarkupLMForSequenceClassification"), + ("mbart", "MBartForSequenceClassification"), + ("mega", "MegaForSequenceClassification"), + ("megatron-bert", "MegatronBertForSequenceClassification"), + ("mistral", "MistralForSequenceClassification"), + ("mixtral", "MixtralForSequenceClassification"), + ("mobilebert", "MobileBertForSequenceClassification"), + ("mpnet", "MPNetForSequenceClassification"), + ("mpt", "MptForSequenceClassification"), + ("mra", "MraForSequenceClassification"), + ("mt5", "MT5ForSequenceClassification"), + ("mvp", "MvpForSequenceClassification"), + ("nezha", "NezhaForSequenceClassification"), + ("nystromformer", "NystromformerForSequenceClassification"), + ("open-llama", "OpenLlamaForSequenceClassification"), + ("openai-gpt", "OpenAIGPTForSequenceClassification"), + ("opt", "OPTForSequenceClassification"), + ("perceiver", "PerceiverForSequenceClassification"), + ("persimmon", "PersimmonForSequenceClassification"), + ("phi", "PhiForSequenceClassification"), + ("plbart", "PLBartForSequenceClassification"), + ("qdqbert", "QDQBertForSequenceClassification"), + ("reformer", "ReformerForSequenceClassification"), + ("rembert", "RemBertForSequenceClassification"), + ("roberta", "RobertaForSequenceClassification"), + ("roberta-prelayernorm", "RobertaPreLayerNormForSequenceClassification"), + ("roc_bert", "RoCBertForSequenceClassification"), + ("roformer", "RoFormerForSequenceClassification"), + ("squeezebert", "SqueezeBertForSequenceClassification"), + ("t5", "T5ForSequenceClassification"), + ("tapas", "TapasForSequenceClassification"), + ("transfo-xl", "TransfoXLForSequenceClassification"), + ("umt5", "UMT5ForSequenceClassification"), + ("xlm", "XLMForSequenceClassification"), + ("xlm-roberta", "XLMRobertaForSequenceClassification"), + ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"), + ("xlnet", "XLNetForSequenceClassification"), + ("xmod", "XmodForSequenceClassification"), + ("yoso", "YosoForSequenceClassification"), + ] +) + +MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Question Answering mapping + ("albert", "AlbertForQuestionAnswering"), + ("bart", "BartForQuestionAnswering"), + ("bert", "BertForQuestionAnswering"), + ("big_bird", "BigBirdForQuestionAnswering"), + ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"), + ("bloom", "BloomForQuestionAnswering"), + ("camembert", "CamembertForQuestionAnswering"), + ("canine", "CanineForQuestionAnswering"), + ("convbert", "ConvBertForQuestionAnswering"), + ("data2vec-text", "Data2VecTextForQuestionAnswering"), + ("deberta", "DebertaForQuestionAnswering"), + ("deberta-v2", "DebertaV2ForQuestionAnswering"), + ("distilbert", "DistilBertForQuestionAnswering"), + ("electra", "ElectraForQuestionAnswering"), + ("ernie", "ErnieForQuestionAnswering"), + ("ernie_m", "ErnieMForQuestionAnswering"), + ("falcon", "FalconForQuestionAnswering"), + ("flaubert", "FlaubertForQuestionAnsweringSimple"), + ("fnet", "FNetForQuestionAnswering"), + ("funnel", "FunnelForQuestionAnswering"), + ("gpt2", "GPT2ForQuestionAnswering"), + ("gpt_neo", "GPTNeoForQuestionAnswering"), + ("gpt_neox", "GPTNeoXForQuestionAnswering"), + ("gptj", "GPTJForQuestionAnswering"), + ("ibert", "IBertForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ("led", "LEDForQuestionAnswering"), + ("lilt", "LiltForQuestionAnswering"), + ("longformer", "LongformerForQuestionAnswering"), + ("luke", "LukeForQuestionAnswering"), + ("lxmert", "LxmertForQuestionAnswering"), + ("markuplm", "MarkupLMForQuestionAnswering"), + ("mbart", "MBartForQuestionAnswering"), + ("mega", "MegaForQuestionAnswering"), + ("megatron-bert", "MegatronBertForQuestionAnswering"), + ("mobilebert", "MobileBertForQuestionAnswering"), + ("mpnet", "MPNetForQuestionAnswering"), + ("mpt", "MptForQuestionAnswering"), + ("mra", "MraForQuestionAnswering"), + ("mt5", "MT5ForQuestionAnswering"), + ("mvp", "MvpForQuestionAnswering"), + ("nezha", "NezhaForQuestionAnswering"), + ("nystromformer", "NystromformerForQuestionAnswering"), + ("opt", "OPTForQuestionAnswering"), + ("qdqbert", "QDQBertForQuestionAnswering"), + ("reformer", "ReformerForQuestionAnswering"), + ("rembert", "RemBertForQuestionAnswering"), + ("roberta", "RobertaForQuestionAnswering"), + ("roberta-prelayernorm", "RobertaPreLayerNormForQuestionAnswering"), + ("roc_bert", "RoCBertForQuestionAnswering"), + ("roformer", "RoFormerForQuestionAnswering"), + ("splinter", "SplinterForQuestionAnswering"), + ("squeezebert", "SqueezeBertForQuestionAnswering"), + ("t5", "T5ForQuestionAnswering"), + ("umt5", "UMT5ForQuestionAnswering"), + ("xlm", "XLMForQuestionAnsweringSimple"), + ("xlm-roberta", "XLMRobertaForQuestionAnswering"), + ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"), + ("xlnet", "XLNetForQuestionAnsweringSimple"), + ("xmod", "XmodForQuestionAnswering"), + ("yoso", "YosoForQuestionAnswering"), + ] +) + +MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Table Question Answering mapping + ("tapas", "TapasForQuestionAnswering"), + ] +) + +MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("blip-2", "Blip2ForConditionalGeneration"), + ("vilt", "ViltForQuestionAnswering"), + ] +) + +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "LayoutLMForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ] +) + +MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Token Classification mapping + ("albert", "AlbertForTokenClassification"), + ("bert", "BertForTokenClassification"), + ("big_bird", "BigBirdForTokenClassification"), + ("biogpt", "BioGptForTokenClassification"), + ("bloom", "BloomForTokenClassification"), + ("bros", "BrosForTokenClassification"), + ("camembert", "CamembertForTokenClassification"), + ("canine", "CanineForTokenClassification"), + ("convbert", "ConvBertForTokenClassification"), + ("data2vec-text", "Data2VecTextForTokenClassification"), + ("deberta", "DebertaForTokenClassification"), + ("deberta-v2", "DebertaV2ForTokenClassification"), + ("distilbert", "DistilBertForTokenClassification"), + ("electra", "ElectraForTokenClassification"), + ("ernie", "ErnieForTokenClassification"), + ("ernie_m", "ErnieMForTokenClassification"), + ("esm", "EsmForTokenClassification"), + ("falcon", "FalconForTokenClassification"), + ("flaubert", "FlaubertForTokenClassification"), + ("fnet", "FNetForTokenClassification"), + ("funnel", "FunnelForTokenClassification"), + ("gpt-sw3", "GPT2ForTokenClassification"), + ("gpt2", "GPT2ForTokenClassification"), + ("gpt_bigcode", "GPTBigCodeForTokenClassification"), + ("gpt_neo", "GPTNeoForTokenClassification"), + ("gpt_neox", "GPTNeoXForTokenClassification"), + ("ibert", "IBertForTokenClassification"), + ("layoutlm", "LayoutLMForTokenClassification"), + ("layoutlmv2", "LayoutLMv2ForTokenClassification"), + ("layoutlmv3", "LayoutLMv3ForTokenClassification"), + ("lilt", "LiltForTokenClassification"), + ("longformer", "LongformerForTokenClassification"), + ("luke", "LukeForTokenClassification"), + ("markuplm", "MarkupLMForTokenClassification"), + ("mega", "MegaForTokenClassification"), + ("megatron-bert", "MegatronBertForTokenClassification"), + ("mobilebert", "MobileBertForTokenClassification"), + ("mpnet", "MPNetForTokenClassification"), + ("mpt", "MptForTokenClassification"), + ("mra", "MraForTokenClassification"), + ("nezha", "NezhaForTokenClassification"), + ("nystromformer", "NystromformerForTokenClassification"), + ("phi", "PhiForTokenClassification"), + ("qdqbert", "QDQBertForTokenClassification"), + ("rembert", "RemBertForTokenClassification"), + ("roberta", "RobertaForTokenClassification"), + ("roberta-prelayernorm", "RobertaPreLayerNormForTokenClassification"), + ("roc_bert", "RoCBertForTokenClassification"), + ("roformer", "RoFormerForTokenClassification"), + ("squeezebert", "SqueezeBertForTokenClassification"), + ("xlm", "XLMForTokenClassification"), + ("xlm-roberta", "XLMRobertaForTokenClassification"), + ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"), + ("xlnet", "XLNetForTokenClassification"), + ("xmod", "XmodForTokenClassification"), + ("yoso", "YosoForTokenClassification"), + ] +) + +MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( + [ + # Model for Multiple Choice mapping + ("albert", "AlbertForMultipleChoice"), + ("bert", "BertForMultipleChoice"), + ("big_bird", "BigBirdForMultipleChoice"), + ("camembert", "CamembertForMultipleChoice"), + ("canine", "CanineForMultipleChoice"), + ("convbert", "ConvBertForMultipleChoice"), + ("data2vec-text", "Data2VecTextForMultipleChoice"), + ("deberta-v2", "DebertaV2ForMultipleChoice"), + ("distilbert", "DistilBertForMultipleChoice"), + ("electra", "ElectraForMultipleChoice"), + ("ernie", "ErnieForMultipleChoice"), + ("ernie_m", "ErnieMForMultipleChoice"), + ("flaubert", "FlaubertForMultipleChoice"), + ("fnet", "FNetForMultipleChoice"), + ("funnel", "FunnelForMultipleChoice"), + ("ibert", "IBertForMultipleChoice"), + ("longformer", "LongformerForMultipleChoice"), + ("luke", "LukeForMultipleChoice"), + ("mega", "MegaForMultipleChoice"), + ("megatron-bert", "MegatronBertForMultipleChoice"), + ("mobilebert", "MobileBertForMultipleChoice"), + ("mpnet", "MPNetForMultipleChoice"), + ("mra", "MraForMultipleChoice"), + ("nezha", "NezhaForMultipleChoice"), + ("nystromformer", "NystromformerForMultipleChoice"), + ("qdqbert", "QDQBertForMultipleChoice"), + ("rembert", "RemBertForMultipleChoice"), + ("roberta", "RobertaForMultipleChoice"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMultipleChoice"), + ("roc_bert", "RoCBertForMultipleChoice"), + ("roformer", "RoFormerForMultipleChoice"), + ("squeezebert", "SqueezeBertForMultipleChoice"), + ("xlm", "XLMForMultipleChoice"), + ("xlm-roberta", "XLMRobertaForMultipleChoice"), + ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"), + ("xlnet", "XLNetForMultipleChoice"), + ("xmod", "XmodForMultipleChoice"), + ("yoso", "YosoForMultipleChoice"), + ] +) + +MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( + [ + ("bert", "BertForNextSentencePrediction"), + ("ernie", "ErnieForNextSentencePrediction"), + ("fnet", "FNetForNextSentencePrediction"), + ("megatron-bert", "MegatronBertForNextSentencePrediction"), + ("mobilebert", "MobileBertForNextSentencePrediction"), + ("nezha", "NezhaForNextSentencePrediction"), + ("qdqbert", "QDQBertForNextSentencePrediction"), + ] +) + +MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Audio Classification mapping + ("audio-spectrogram-transformer", "ASTForAudioClassification"), + ("data2vec-audio", "Data2VecAudioForSequenceClassification"), + ("hubert", "HubertForSequenceClassification"), + ("sew", "SEWForSequenceClassification"), + ("sew-d", "SEWDForSequenceClassification"), + ("unispeech", "UniSpeechForSequenceClassification"), + ("unispeech-sat", "UniSpeechSatForSequenceClassification"), + ("wav2vec2", "Wav2Vec2ForSequenceClassification"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"), + ("wavlm", "WavLMForSequenceClassification"), + ("whisper", "WhisperForAudioClassification"), + ] +) + +MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict( + [ + # Model for Connectionist temporal classification (CTC) mapping + ("data2vec-audio", "Data2VecAudioForCTC"), + ("hubert", "HubertForCTC"), + ("mctct", "MCTCTForCTC"), + ("sew", "SEWForCTC"), + ("sew-d", "SEWDForCTC"), + ("unispeech", "UniSpeechForCTC"), + ("unispeech-sat", "UniSpeechSatForCTC"), + ("wav2vec2", "Wav2Vec2ForCTC"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"), + ("wavlm", "WavLMForCTC"), + ] +) + +MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Audio Classification mapping + ("data2vec-audio", "Data2VecAudioForAudioFrameClassification"), + ("unispeech-sat", "UniSpeechSatForAudioFrameClassification"), + ("wav2vec2", "Wav2Vec2ForAudioFrameClassification"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForAudioFrameClassification"), + ("wavlm", "WavLMForAudioFrameClassification"), + ] +) + +MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict( + [ + # Model for Audio Classification mapping + ("data2vec-audio", "Data2VecAudioForXVector"), + ("unispeech-sat", "UniSpeechSatForXVector"), + ("wav2vec2", "Wav2Vec2ForXVector"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForXVector"), + ("wavlm", "WavLMForXVector"), + ] +) + +MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = OrderedDict( + [ + # Model for Text-To-Spectrogram mapping + ("speecht5", "SpeechT5ForTextToSpeech"), + ] +) + +MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = OrderedDict( + [ + # Model for Text-To-Waveform mapping + ("bark", "BarkModel"), + ("musicgen", "MusicgenForConditionalGeneration"), + ("seamless_m4t", "SeamlessM4TForTextToSpeech"), + ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToSpeech"), + ("vits", "VitsModel"), + ] +) + +MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Image Classification mapping + ("align", "AlignModel"), + ("altclip", "AltCLIPModel"), + ("blip", "BlipModel"), + ("chinese_clip", "ChineseCLIPModel"), + ("clip", "CLIPModel"), + ("clipseg", "CLIPSegModel"), + ] +) + +MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict( + [ + # Backbone mapping + ("beit", "BeitBackbone"), + ("bit", "BitBackbone"), + ("convnext", "ConvNextBackbone"), + ("convnextv2", "ConvNextV2Backbone"), + ("dinat", "DinatBackbone"), + ("dinov2", "Dinov2Backbone"), + ("focalnet", "FocalNetBackbone"), + ("maskformer-swin", "MaskFormerSwinBackbone"), + ("nat", "NatBackbone"), + ("resnet", "ResNetBackbone"), + ("swin", "SwinBackbone"), + ("swinv2", "Swinv2Backbone"), + ("timm_backbone", "TimmBackbone"), + ("vitdet", "VitDetBackbone"), + ] +) + +MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict( + [ + ("sam", "SamModel"), + ] +) + +MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict( + [ + ("albert", "AlbertModel"), + ("bert", "BertModel"), + ("big_bird", "BigBirdModel"), + ("data2vec-text", "Data2VecTextModel"), + ("deberta", "DebertaModel"), + ("deberta-v2", "DebertaV2Model"), + ("distilbert", "DistilBertModel"), + ("electra", "ElectraModel"), + ("flaubert", "FlaubertModel"), + ("ibert", "IBertModel"), + ("longformer", "LongformerModel"), + ("mobilebert", "MobileBertModel"), + ("mt5", "MT5EncoderModel"), + ("nystromformer", "NystromformerModel"), + ("reformer", "ReformerModel"), + ("rembert", "RemBertModel"), + ("roberta", "RobertaModel"), + ("roberta-prelayernorm", "RobertaPreLayerNormModel"), + ("roc_bert", "RoCBertModel"), + ("roformer", "RoFormerModel"), + ("squeezebert", "SqueezeBertModel"), + ("t5", "T5EncoderModel"), + ("umt5", "UMT5EncoderModel"), + ("xlm", "XLMModel"), + ("xlm-roberta", "XLMRobertaModel"), + ("xlm-roberta-xl", "XLMRobertaXLModel"), + ] +) + +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("patchtsmixer", "PatchTSMixerForTimeSeriesClassification"), + ("patchtst", "PatchTSTForClassification"), + ] +) + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict( + [ + ("patchtsmixer", "PatchTSMixerForRegression"), + ("patchtst", "PatchTSTForRegression"), + ] +) + +MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict( + [ + ("swin2sr", "Swin2SRForImageSuperResolution"), + ] +) + +MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES) +MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES) +MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES) +MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) +MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES +) +MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) +MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES) +MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES +) +MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES) +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES +) +MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES) +MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES +) +MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES) +MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES +) +MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES) +MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES) +MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES) + +MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES +) + +MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES) + +MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES) + +MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) + +MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) + +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES +) + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES +) + +MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) + + +class AutoModelForMaskGeneration(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING + + +class AutoModelForTextEncoding(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING + + +class AutoModelForImageToImage(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING + + +class AutoModel(_BaseAutoModelClass): + _model_mapping = MODEL_MAPPING + + +AutoModel = auto_class_update(AutoModel) + + +class AutoModelForPreTraining(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_PRETRAINING_MAPPING + + +AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining") + + +# Private on purpose, the public class will add the deprecation warnings. +class _AutoModelWithLMHead(_BaseAutoModelClass): + _model_mapping = MODEL_WITH_LM_HEAD_MAPPING + + +_AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling") + + +class AutoModelForCausalLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING + + +AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling") + + +class AutoModelForMaskedLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MASKED_LM_MAPPING + + +AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling") + + +class AutoModelForSeq2SeqLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING + + +AutoModelForSeq2SeqLM = auto_class_update( + AutoModelForSeq2SeqLM, + head_doc="sequence-to-sequence language modeling", + checkpoint_for_example="t5-base", +) + + +class AutoModelForSequenceClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING + + +AutoModelForSequenceClassification = auto_class_update( + AutoModelForSequenceClassification, head_doc="sequence classification" +) + + +class AutoModelForQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING + + +AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering") + + +class AutoModelForTableQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + + +AutoModelForTableQuestionAnswering = auto_class_update( + AutoModelForTableQuestionAnswering, + head_doc="table question answering", + checkpoint_for_example="google/tapas-base-finetuned-wtq", +) + + +class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING + + +AutoModelForVisualQuestionAnswering = auto_class_update( + AutoModelForVisualQuestionAnswering, + head_doc="visual question answering", + checkpoint_for_example="dandelin/vilt-b32-finetuned-vqa", +) + + +class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +AutoModelForDocumentQuestionAnswering = auto_class_update( + AutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', +) + + +class AutoModelForTokenClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING + + +AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification") + + +class AutoModelForMultipleChoice(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING + + +AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice") + + +class AutoModelForNextSentencePrediction(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING + + +AutoModelForNextSentencePrediction = auto_class_update( + AutoModelForNextSentencePrediction, head_doc="next sentence prediction" +) + + +class AutoModelForImageClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING + + +AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc="image classification") + + +class AutoModelForZeroShotImageClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING + + +AutoModelForZeroShotImageClassification = auto_class_update( + AutoModelForZeroShotImageClassification, head_doc="zero-shot image classification" +) + + +class AutoModelForImageSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING + + +AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc="image segmentation") + + +class AutoModelForSemanticSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING + + +AutoModelForSemanticSegmentation = auto_class_update( + AutoModelForSemanticSegmentation, head_doc="semantic segmentation" +) + + +class AutoModelForUniversalSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING + + +AutoModelForUniversalSegmentation = auto_class_update( + AutoModelForUniversalSegmentation, head_doc="universal image segmentation" +) + + +class AutoModelForInstanceSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING + + +AutoModelForInstanceSegmentation = auto_class_update( + AutoModelForInstanceSegmentation, head_doc="instance segmentation" +) + + +class AutoModelForObjectDetection(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING + + +AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection") + + +class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING + + +AutoModelForZeroShotObjectDetection = auto_class_update( + AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection" +) + + +class AutoModelForDepthEstimation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING + + +AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation") + + +class AutoModelForVideoClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING + + +AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification") + + +class AutoModelForVision2Seq(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING + + +AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling") + + +class AutoModelForAudioClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING + + +AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc="audio classification") + + +class AutoModelForCTC(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_CTC_MAPPING + + +AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc="connectionist temporal classification") + + +class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING + + +AutoModelForSpeechSeq2Seq = auto_class_update( + AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" +) + + +class AutoModelForAudioFrameClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING + + +AutoModelForAudioFrameClassification = auto_class_update( + AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification" +) + + +class AutoModelForAudioXVector(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING + + +class AutoModelForTextToSpectrogram(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING + + +class AutoModelForTextToWaveform(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING + + +class AutoBackbone(_BaseAutoBackboneClass): + _model_mapping = MODEL_FOR_BACKBONE_MAPPING + + +AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc="audio retrieval via x-vector") + + +class AutoModelForMaskedImageModeling(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING + + +AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling") + + +class AutoModelWithLMHead(_AutoModelWithLMHead): + @classmethod + def from_config(cls, config): + warnings.warn( + "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " + "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " + "`AutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_config(config) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + warnings.warn( + "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " + "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " + "`AutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/modified/models/auto/modeling_flax_auto.py b/modified/models/auto/modeling_flax_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7d87e4e2dbd49946859c826f1ef5c8f6e76a5c --- /dev/null +++ b/modified/models/auto/modeling_flax_auto.py @@ -0,0 +1,376 @@ +# coding=utf-8 +# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Model class.""" + + +from collections import OrderedDict + +from ...utils import logging +from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update +from .configuration_auto import CONFIG_MAPPING_NAMES + + +logger = logging.get_logger(__name__) + + +FLAX_MODEL_MAPPING_NAMES = OrderedDict( + [ + # Base model mapping + ("albert", "FlaxAlbertModel"), + ("bart", "FlaxBartModel"), + ("beit", "FlaxBeitModel"), + ("bert", "FlaxBertModel"), + ("big_bird", "FlaxBigBirdModel"), + ("blenderbot", "FlaxBlenderbotModel"), + ("blenderbot-small", "FlaxBlenderbotSmallModel"), + ("bloom", "FlaxBloomModel"), + ("clip", "FlaxCLIPModel"), + ("distilbert", "FlaxDistilBertModel"), + ("electra", "FlaxElectraModel"), + ("gpt-sw3", "FlaxGPT2Model"), + ("gpt2", "FlaxGPT2Model"), + ("gpt_neo", "FlaxGPTNeoModel"), + ("gptj", "FlaxGPTJModel"), + ("llama", "FlaxLlamaModel"), + ("longt5", "FlaxLongT5Model"), + ("marian", "FlaxMarianModel"), + ("mbart", "FlaxMBartModel"), + ("mt5", "FlaxMT5Model"), + ("opt", "FlaxOPTModel"), + ("pegasus", "FlaxPegasusModel"), + ("regnet", "FlaxRegNetModel"), + ("resnet", "FlaxResNetModel"), + ("roberta", "FlaxRobertaModel"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), + ("roformer", "FlaxRoFormerModel"), + ("t5", "FlaxT5Model"), + ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), + ("vit", "FlaxViTModel"), + ("wav2vec2", "FlaxWav2Vec2Model"), + ("whisper", "FlaxWhisperModel"), + ("xglm", "FlaxXGLMModel"), + ("xlm-roberta", "FlaxXLMRobertaModel"), + ] +) + +FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( + [ + # Model for pre-training mapping + ("albert", "FlaxAlbertForPreTraining"), + ("bart", "FlaxBartForConditionalGeneration"), + ("bert", "FlaxBertForPreTraining"), + ("big_bird", "FlaxBigBirdForPreTraining"), + ("electra", "FlaxElectraForPreTraining"), + ("longt5", "FlaxLongT5ForConditionalGeneration"), + ("mbart", "FlaxMBartForConditionalGeneration"), + ("mt5", "FlaxMT5ForConditionalGeneration"), + ("roberta", "FlaxRobertaForMaskedLM"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), + ("roformer", "FlaxRoFormerForMaskedLM"), + ("t5", "FlaxT5ForConditionalGeneration"), + ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), + ("whisper", "FlaxWhisperForConditionalGeneration"), + ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), + ] +) + +FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Masked LM mapping + ("albert", "FlaxAlbertForMaskedLM"), + ("bart", "FlaxBartForConditionalGeneration"), + ("bert", "FlaxBertForMaskedLM"), + ("big_bird", "FlaxBigBirdForMaskedLM"), + ("distilbert", "FlaxDistilBertForMaskedLM"), + ("electra", "FlaxElectraForMaskedLM"), + ("mbart", "FlaxMBartForConditionalGeneration"), + ("roberta", "FlaxRobertaForMaskedLM"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), + ("roformer", "FlaxRoFormerForMaskedLM"), + ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), + ] +) + +FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Seq2Seq Causal LM mapping + ("bart", "FlaxBartForConditionalGeneration"), + ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), + ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), + ("encoder-decoder", "FlaxEncoderDecoderModel"), + ("longt5", "FlaxLongT5ForConditionalGeneration"), + ("marian", "FlaxMarianMTModel"), + ("mbart", "FlaxMBartForConditionalGeneration"), + ("mt5", "FlaxMT5ForConditionalGeneration"), + ("pegasus", "FlaxPegasusForConditionalGeneration"), + ("t5", "FlaxT5ForConditionalGeneration"), + ] +) + +FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Image-classsification + ("beit", "FlaxBeitForImageClassification"), + ("regnet", "FlaxRegNetForImageClassification"), + ("resnet", "FlaxResNetForImageClassification"), + ("vit", "FlaxViTForImageClassification"), + ] +) + +FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), + ] +) + +FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Causal LM mapping + ("bart", "FlaxBartForCausalLM"), + ("bert", "FlaxBertForCausalLM"), + ("big_bird", "FlaxBigBirdForCausalLM"), + ("bloom", "FlaxBloomForCausalLM"), + ("electra", "FlaxElectraForCausalLM"), + ("gpt-sw3", "FlaxGPT2LMHeadModel"), + ("gpt2", "FlaxGPT2LMHeadModel"), + ("gpt_neo", "FlaxGPTNeoForCausalLM"), + ("gptj", "FlaxGPTJForCausalLM"), + ("llama", "FlaxLlamaForCausalLM"), + ("opt", "FlaxOPTForCausalLM"), + ("roberta", "FlaxRobertaForCausalLM"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), + ("xglm", "FlaxXGLMForCausalLM"), + ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), + ] +) + +FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Sequence Classification mapping + ("albert", "FlaxAlbertForSequenceClassification"), + ("bart", "FlaxBartForSequenceClassification"), + ("bert", "FlaxBertForSequenceClassification"), + ("big_bird", "FlaxBigBirdForSequenceClassification"), + ("distilbert", "FlaxDistilBertForSequenceClassification"), + ("electra", "FlaxElectraForSequenceClassification"), + ("mbart", "FlaxMBartForSequenceClassification"), + ("roberta", "FlaxRobertaForSequenceClassification"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), + ("roformer", "FlaxRoFormerForSequenceClassification"), + ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), + ] +) + +FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Question Answering mapping + ("albert", "FlaxAlbertForQuestionAnswering"), + ("bart", "FlaxBartForQuestionAnswering"), + ("bert", "FlaxBertForQuestionAnswering"), + ("big_bird", "FlaxBigBirdForQuestionAnswering"), + ("distilbert", "FlaxDistilBertForQuestionAnswering"), + ("electra", "FlaxElectraForQuestionAnswering"), + ("mbart", "FlaxMBartForQuestionAnswering"), + ("roberta", "FlaxRobertaForQuestionAnswering"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), + ("roformer", "FlaxRoFormerForQuestionAnswering"), + ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), + ] +) + +FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Token Classification mapping + ("albert", "FlaxAlbertForTokenClassification"), + ("bert", "FlaxBertForTokenClassification"), + ("big_bird", "FlaxBigBirdForTokenClassification"), + ("distilbert", "FlaxDistilBertForTokenClassification"), + ("electra", "FlaxElectraForTokenClassification"), + ("roberta", "FlaxRobertaForTokenClassification"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), + ("roformer", "FlaxRoFormerForTokenClassification"), + ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), + ] +) + +FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( + [ + # Model for Multiple Choice mapping + ("albert", "FlaxAlbertForMultipleChoice"), + ("bert", "FlaxBertForMultipleChoice"), + ("big_bird", "FlaxBigBirdForMultipleChoice"), + ("distilbert", "FlaxDistilBertForMultipleChoice"), + ("electra", "FlaxElectraForMultipleChoice"), + ("roberta", "FlaxRobertaForMultipleChoice"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), + ("roformer", "FlaxRoFormerForMultipleChoice"), + ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), + ] +) + +FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( + [ + ("bert", "FlaxBertForNextSentencePrediction"), + ] +) + +FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), + ("whisper", "FlaxWhisperForConditionalGeneration"), + ] +) + +FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("whisper", "FlaxWhisperForAudioClassification"), + ] +) + +FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) +FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) +FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) +FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES +) +FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) +FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) +FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES +) +FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES +) +FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +) +FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES +) +FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES +) +FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES +) +FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES +) + + +class FlaxAutoModel(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_MAPPING + + +FlaxAutoModel = auto_class_update(FlaxAutoModel) + + +class FlaxAutoModelForPreTraining(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING + + +FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") + + +class FlaxAutoModelForCausalLM(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING + + +FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") + + +class FlaxAutoModelForMaskedLM(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING + + +FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") + + +class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING + + +FlaxAutoModelForSeq2SeqLM = auto_class_update( + FlaxAutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" +) + + +class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING + + +FlaxAutoModelForSequenceClassification = auto_class_update( + FlaxAutoModelForSequenceClassification, head_doc="sequence classification" +) + + +class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING + + +FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") + + +class FlaxAutoModelForTokenClassification(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING + + +FlaxAutoModelForTokenClassification = auto_class_update( + FlaxAutoModelForTokenClassification, head_doc="token classification" +) + + +class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING + + +FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") + + +class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING + + +FlaxAutoModelForNextSentencePrediction = auto_class_update( + FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" +) + + +class FlaxAutoModelForImageClassification(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING + + +FlaxAutoModelForImageClassification = auto_class_update( + FlaxAutoModelForImageClassification, head_doc="image classification" +) + + +class FlaxAutoModelForVision2Seq(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING + + +FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc="vision-to-text modeling") + + +class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING + + +FlaxAutoModelForSpeechSeq2Seq = auto_class_update( + FlaxAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" +) diff --git a/modified/models/auto/modeling_tf_auto.py b/modified/models/auto/modeling_tf_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..e79922f928226d204590cdba5e50e60596dda041 --- /dev/null +++ b/modified/models/auto/modeling_tf_auto.py @@ -0,0 +1,719 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Model class.""" + + +import warnings +from collections import OrderedDict + +from ...utils import logging +from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update +from .configuration_auto import CONFIG_MAPPING_NAMES + + +logger = logging.get_logger(__name__) + + +TF_MODEL_MAPPING_NAMES = OrderedDict( + [ + # Base model mapping + ("albert", "TFAlbertModel"), + ("bart", "TFBartModel"), + ("bert", "TFBertModel"), + ("blenderbot", "TFBlenderbotModel"), + ("blenderbot-small", "TFBlenderbotSmallModel"), + ("blip", "TFBlipModel"), + ("camembert", "TFCamembertModel"), + ("clip", "TFCLIPModel"), + ("convbert", "TFConvBertModel"), + ("convnext", "TFConvNextModel"), + ("convnextv2", "TFConvNextV2Model"), + ("ctrl", "TFCTRLModel"), + ("cvt", "TFCvtModel"), + ("data2vec-vision", "TFData2VecVisionModel"), + ("deberta", "TFDebertaModel"), + ("deberta-v2", "TFDebertaV2Model"), + ("deit", "TFDeiTModel"), + ("distilbert", "TFDistilBertModel"), + ("dpr", "TFDPRQuestionEncoder"), + ("efficientformer", "TFEfficientFormerModel"), + ("electra", "TFElectraModel"), + ("esm", "TFEsmModel"), + ("flaubert", "TFFlaubertModel"), + ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")), + ("gpt-sw3", "TFGPT2Model"), + ("gpt2", "TFGPT2Model"), + ("gptj", "TFGPTJModel"), + ("groupvit", "TFGroupViTModel"), + ("hubert", "TFHubertModel"), + ("layoutlm", "TFLayoutLMModel"), + ("layoutlmv3", "TFLayoutLMv3Model"), + ("led", "TFLEDModel"), + ("longformer", "TFLongformerModel"), + ("lxmert", "TFLxmertModel"), + ("marian", "TFMarianModel"), + ("mbart", "TFMBartModel"), + ("mobilebert", "TFMobileBertModel"), + ("mobilevit", "TFMobileViTModel"), + ("mpnet", "TFMPNetModel"), + ("mt5", "TFMT5Model"), + ("openai-gpt", "TFOpenAIGPTModel"), + ("opt", "TFOPTModel"), + ("pegasus", "TFPegasusModel"), + ("regnet", "TFRegNetModel"), + ("rembert", "TFRemBertModel"), + ("resnet", "TFResNetModel"), + ("roberta", "TFRobertaModel"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"), + ("roformer", "TFRoFormerModel"), + ("sam", "TFSamModel"), + ("segformer", "TFSegformerModel"), + ("speech_to_text", "TFSpeech2TextModel"), + ("swin", "TFSwinModel"), + ("t5", "TFT5Model"), + ("tapas", "TFTapasModel"), + ("transfo-xl", "TFTransfoXLModel"), + ("vision-text-dual-encoder", "TFVisionTextDualEncoderModel"), + ("vit", "TFViTModel"), + ("vit_mae", "TFViTMAEModel"), + ("wav2vec2", "TFWav2Vec2Model"), + ("whisper", "TFWhisperModel"), + ("xglm", "TFXGLMModel"), + ("xlm", "TFXLMModel"), + ("xlm-roberta", "TFXLMRobertaModel"), + ("xlnet", "TFXLNetModel"), + ] +) + +TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( + [ + # Model for pre-training mapping + ("albert", "TFAlbertForPreTraining"), + ("bart", "TFBartForConditionalGeneration"), + ("bert", "TFBertForPreTraining"), + ("camembert", "TFCamembertForMaskedLM"), + ("ctrl", "TFCTRLLMHeadModel"), + ("distilbert", "TFDistilBertForMaskedLM"), + ("electra", "TFElectraForPreTraining"), + ("flaubert", "TFFlaubertWithLMHeadModel"), + ("funnel", "TFFunnelForPreTraining"), + ("gpt-sw3", "TFGPT2LMHeadModel"), + ("gpt2", "TFGPT2LMHeadModel"), + ("layoutlm", "TFLayoutLMForMaskedLM"), + ("lxmert", "TFLxmertForPreTraining"), + ("mobilebert", "TFMobileBertForPreTraining"), + ("mpnet", "TFMPNetForMaskedLM"), + ("openai-gpt", "TFOpenAIGPTLMHeadModel"), + ("roberta", "TFRobertaForMaskedLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), + ("t5", "TFT5ForConditionalGeneration"), + ("tapas", "TFTapasForMaskedLM"), + ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("vit_mae", "TFViTMAEForPreTraining"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForMaskedLM"), + ("xlnet", "TFXLNetLMHeadModel"), + ] +) + +TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict( + [ + # Model with LM heads mapping + ("albert", "TFAlbertForMaskedLM"), + ("bart", "TFBartForConditionalGeneration"), + ("bert", "TFBertForMaskedLM"), + ("camembert", "TFCamembertForMaskedLM"), + ("convbert", "TFConvBertForMaskedLM"), + ("ctrl", "TFCTRLLMHeadModel"), + ("distilbert", "TFDistilBertForMaskedLM"), + ("electra", "TFElectraForMaskedLM"), + ("esm", "TFEsmForMaskedLM"), + ("flaubert", "TFFlaubertWithLMHeadModel"), + ("funnel", "TFFunnelForMaskedLM"), + ("gpt-sw3", "TFGPT2LMHeadModel"), + ("gpt2", "TFGPT2LMHeadModel"), + ("gptj", "TFGPTJForCausalLM"), + ("layoutlm", "TFLayoutLMForMaskedLM"), + ("led", "TFLEDForConditionalGeneration"), + ("longformer", "TFLongformerForMaskedLM"), + ("marian", "TFMarianMTModel"), + ("mobilebert", "TFMobileBertForMaskedLM"), + ("mpnet", "TFMPNetForMaskedLM"), + ("openai-gpt", "TFOpenAIGPTLMHeadModel"), + ("rembert", "TFRemBertForMaskedLM"), + ("roberta", "TFRobertaForMaskedLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), + ("roformer", "TFRoFormerForMaskedLM"), + ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), + ("t5", "TFT5ForConditionalGeneration"), + ("tapas", "TFTapasForMaskedLM"), + ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("whisper", "TFWhisperForConditionalGeneration"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForMaskedLM"), + ("xlnet", "TFXLNetLMHeadModel"), + ] +) + +TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Causal LM mapping + ("bert", "TFBertLMHeadModel"), + ("camembert", "TFCamembertForCausalLM"), + ("ctrl", "TFCTRLLMHeadModel"), + ("gpt-sw3", "TFGPT2LMHeadModel"), + ("gpt2", "TFGPT2LMHeadModel"), + ("gptj", "TFGPTJForCausalLM"), + ("openai-gpt", "TFOpenAIGPTLMHeadModel"), + ("opt", "TFOPTForCausalLM"), + ("rembert", "TFRemBertForCausalLM"), + ("roberta", "TFRobertaForCausalLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForCausalLM"), + ("roformer", "TFRoFormerForCausalLM"), + ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("xglm", "TFXGLMForCausalLM"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForCausalLM"), + ("xlnet", "TFXLNetLMHeadModel"), + ] +) + +TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( + [ + ("deit", "TFDeiTForMaskedImageModeling"), + ("swin", "TFSwinForMaskedImageModeling"), + ] +) + +TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Image-classsification + ("convnext", "TFConvNextForImageClassification"), + ("convnextv2", "TFConvNextV2ForImageClassification"), + ("cvt", "TFCvtForImageClassification"), + ("data2vec-vision", "TFData2VecVisionForImageClassification"), + ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")), + ( + "efficientformer", + ("TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher"), + ), + ("mobilevit", "TFMobileViTForImageClassification"), + ("regnet", "TFRegNetForImageClassification"), + ("resnet", "TFResNetForImageClassification"), + ("segformer", "TFSegformerForImageClassification"), + ("swin", "TFSwinForImageClassification"), + ("vit", "TFViTForImageClassification"), + ] +) + + +TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Image Classification mapping + ("blip", "TFBlipModel"), + ("clip", "TFCLIPModel"), + ] +) + + +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Semantic Segmentation mapping + ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"), + ("mobilevit", "TFMobileViTForSemanticSegmentation"), + ("segformer", "TFSegformerForSemanticSegmentation"), + ] +) + +TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("blip", "TFBlipForConditionalGeneration"), + ("vision-encoder-decoder", "TFVisionEncoderDecoderModel"), + ] +) + +TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Masked LM mapping + ("albert", "TFAlbertForMaskedLM"), + ("bert", "TFBertForMaskedLM"), + ("camembert", "TFCamembertForMaskedLM"), + ("convbert", "TFConvBertForMaskedLM"), + ("deberta", "TFDebertaForMaskedLM"), + ("deberta-v2", "TFDebertaV2ForMaskedLM"), + ("distilbert", "TFDistilBertForMaskedLM"), + ("electra", "TFElectraForMaskedLM"), + ("esm", "TFEsmForMaskedLM"), + ("flaubert", "TFFlaubertWithLMHeadModel"), + ("funnel", "TFFunnelForMaskedLM"), + ("layoutlm", "TFLayoutLMForMaskedLM"), + ("longformer", "TFLongformerForMaskedLM"), + ("mobilebert", "TFMobileBertForMaskedLM"), + ("mpnet", "TFMPNetForMaskedLM"), + ("rembert", "TFRemBertForMaskedLM"), + ("roberta", "TFRobertaForMaskedLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), + ("roformer", "TFRoFormerForMaskedLM"), + ("tapas", "TFTapasForMaskedLM"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForMaskedLM"), + ] +) + +TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Seq2Seq Causal LM mapping + ("bart", "TFBartForConditionalGeneration"), + ("blenderbot", "TFBlenderbotForConditionalGeneration"), + ("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"), + ("encoder-decoder", "TFEncoderDecoderModel"), + ("led", "TFLEDForConditionalGeneration"), + ("marian", "TFMarianMTModel"), + ("mbart", "TFMBartForConditionalGeneration"), + ("mt5", "TFMT5ForConditionalGeneration"), + ("pegasus", "TFPegasusForConditionalGeneration"), + ("t5", "TFT5ForConditionalGeneration"), + ] +) + +TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), + ("whisper", "TFWhisperForConditionalGeneration"), + ] +) + +TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Sequence Classification mapping + ("albert", "TFAlbertForSequenceClassification"), + ("bart", "TFBartForSequenceClassification"), + ("bert", "TFBertForSequenceClassification"), + ("camembert", "TFCamembertForSequenceClassification"), + ("convbert", "TFConvBertForSequenceClassification"), + ("ctrl", "TFCTRLForSequenceClassification"), + ("deberta", "TFDebertaForSequenceClassification"), + ("deberta-v2", "TFDebertaV2ForSequenceClassification"), + ("distilbert", "TFDistilBertForSequenceClassification"), + ("electra", "TFElectraForSequenceClassification"), + ("esm", "TFEsmForSequenceClassification"), + ("flaubert", "TFFlaubertForSequenceClassification"), + ("funnel", "TFFunnelForSequenceClassification"), + ("gpt-sw3", "TFGPT2ForSequenceClassification"), + ("gpt2", "TFGPT2ForSequenceClassification"), + ("gptj", "TFGPTJForSequenceClassification"), + ("layoutlm", "TFLayoutLMForSequenceClassification"), + ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"), + ("longformer", "TFLongformerForSequenceClassification"), + ("mobilebert", "TFMobileBertForSequenceClassification"), + ("mpnet", "TFMPNetForSequenceClassification"), + ("openai-gpt", "TFOpenAIGPTForSequenceClassification"), + ("rembert", "TFRemBertForSequenceClassification"), + ("roberta", "TFRobertaForSequenceClassification"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForSequenceClassification"), + ("roformer", "TFRoFormerForSequenceClassification"), + ("tapas", "TFTapasForSequenceClassification"), + ("transfo-xl", "TFTransfoXLForSequenceClassification"), + ("xlm", "TFXLMForSequenceClassification"), + ("xlm-roberta", "TFXLMRobertaForSequenceClassification"), + ("xlnet", "TFXLNetForSequenceClassification"), + ] +) + +TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Question Answering mapping + ("albert", "TFAlbertForQuestionAnswering"), + ("bert", "TFBertForQuestionAnswering"), + ("camembert", "TFCamembertForQuestionAnswering"), + ("convbert", "TFConvBertForQuestionAnswering"), + ("deberta", "TFDebertaForQuestionAnswering"), + ("deberta-v2", "TFDebertaV2ForQuestionAnswering"), + ("distilbert", "TFDistilBertForQuestionAnswering"), + ("electra", "TFElectraForQuestionAnswering"), + ("flaubert", "TFFlaubertForQuestionAnsweringSimple"), + ("funnel", "TFFunnelForQuestionAnswering"), + ("gptj", "TFGPTJForQuestionAnswering"), + ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"), + ("longformer", "TFLongformerForQuestionAnswering"), + ("mobilebert", "TFMobileBertForQuestionAnswering"), + ("mpnet", "TFMPNetForQuestionAnswering"), + ("rembert", "TFRemBertForQuestionAnswering"), + ("roberta", "TFRobertaForQuestionAnswering"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForQuestionAnswering"), + ("roformer", "TFRoFormerForQuestionAnswering"), + ("xlm", "TFXLMForQuestionAnsweringSimple"), + ("xlm-roberta", "TFXLMRobertaForQuestionAnswering"), + ("xlnet", "TFXLNetForQuestionAnsweringSimple"), + ] +) +TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([("wav2vec2", "TFWav2Vec2ForSequenceClassification")]) + +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "TFLayoutLMForQuestionAnswering"), + ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"), + ] +) + + +TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Table Question Answering mapping + ("tapas", "TFTapasForQuestionAnswering"), + ] +) + +TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Token Classification mapping + ("albert", "TFAlbertForTokenClassification"), + ("bert", "TFBertForTokenClassification"), + ("camembert", "TFCamembertForTokenClassification"), + ("convbert", "TFConvBertForTokenClassification"), + ("deberta", "TFDebertaForTokenClassification"), + ("deberta-v2", "TFDebertaV2ForTokenClassification"), + ("distilbert", "TFDistilBertForTokenClassification"), + ("electra", "TFElectraForTokenClassification"), + ("esm", "TFEsmForTokenClassification"), + ("flaubert", "TFFlaubertForTokenClassification"), + ("funnel", "TFFunnelForTokenClassification"), + ("layoutlm", "TFLayoutLMForTokenClassification"), + ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"), + ("longformer", "TFLongformerForTokenClassification"), + ("mobilebert", "TFMobileBertForTokenClassification"), + ("mpnet", "TFMPNetForTokenClassification"), + ("rembert", "TFRemBertForTokenClassification"), + ("roberta", "TFRobertaForTokenClassification"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForTokenClassification"), + ("roformer", "TFRoFormerForTokenClassification"), + ("xlm", "TFXLMForTokenClassification"), + ("xlm-roberta", "TFXLMRobertaForTokenClassification"), + ("xlnet", "TFXLNetForTokenClassification"), + ] +) + +TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( + [ + # Model for Multiple Choice mapping + ("albert", "TFAlbertForMultipleChoice"), + ("bert", "TFBertForMultipleChoice"), + ("camembert", "TFCamembertForMultipleChoice"), + ("convbert", "TFConvBertForMultipleChoice"), + ("deberta-v2", "TFDebertaV2ForMultipleChoice"), + ("distilbert", "TFDistilBertForMultipleChoice"), + ("electra", "TFElectraForMultipleChoice"), + ("flaubert", "TFFlaubertForMultipleChoice"), + ("funnel", "TFFunnelForMultipleChoice"), + ("longformer", "TFLongformerForMultipleChoice"), + ("mobilebert", "TFMobileBertForMultipleChoice"), + ("mpnet", "TFMPNetForMultipleChoice"), + ("rembert", "TFRemBertForMultipleChoice"), + ("roberta", "TFRobertaForMultipleChoice"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMultipleChoice"), + ("roformer", "TFRoFormerForMultipleChoice"), + ("xlm", "TFXLMForMultipleChoice"), + ("xlm-roberta", "TFXLMRobertaForMultipleChoice"), + ("xlnet", "TFXLNetForMultipleChoice"), + ] +) + +TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( + [ + ("bert", "TFBertForNextSentencePrediction"), + ("mobilebert", "TFMobileBertForNextSentencePrediction"), + ] +) +TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict( + [ + ("sam", "TFSamModel"), + ] +) +TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict( + [ + ("albert", "TFAlbertModel"), + ("bert", "TFBertModel"), + ("convbert", "TFConvBertModel"), + ("deberta", "TFDebertaModel"), + ("deberta-v2", "TFDebertaV2Model"), + ("distilbert", "TFDistilBertModel"), + ("electra", "TFElectraModel"), + ("flaubert", "TFFlaubertModel"), + ("longformer", "TFLongformerModel"), + ("mobilebert", "TFMobileBertModel"), + ("mt5", "TFMT5EncoderModel"), + ("rembert", "TFRemBertModel"), + ("roberta", "TFRobertaModel"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"), + ("roformer", "TFRoFormerModel"), + ("t5", "TFT5EncoderModel"), + ("xlm", "TFXLMModel"), + ("xlm-roberta", "TFXLMRobertaModel"), + ] +) + +TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES) +TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES) +TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES) +TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) +TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES +) +TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES +) +TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) +TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES) +TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES +) +TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES +) +TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES +) +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) +TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES +) +TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES +) +TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES +) +TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES +) + +TF_MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES +) + +TF_MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) + + +class TFAutoModelForMaskGeneration(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING + + +class TFAutoModelForTextEncoding(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING + + +class TFAutoModel(_BaseAutoModelClass): + _model_mapping = TF_MODEL_MAPPING + + +TFAutoModel = auto_class_update(TFAutoModel) + + +class TFAutoModelForAudioClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING + + +TFAutoModelForAudioClassification = auto_class_update( + TFAutoModelForAudioClassification, head_doc="audio classification" +) + + +class TFAutoModelForPreTraining(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING + + +TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining") + + +# Private on purpose, the public class will add the deprecation warnings. +class _TFAutoModelWithLMHead(_BaseAutoModelClass): + _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING + + +_TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling") + + +class TFAutoModelForCausalLM(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING + + +TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling") + + +class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING + + +TFAutoModelForMaskedImageModeling = auto_class_update( + TFAutoModelForMaskedImageModeling, head_doc="masked image modeling" +) + + +class TFAutoModelForImageClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING + + +TFAutoModelForImageClassification = auto_class_update( + TFAutoModelForImageClassification, head_doc="image classification" +) + + +class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING + + +TFAutoModelForZeroShotImageClassification = auto_class_update( + TFAutoModelForZeroShotImageClassification, head_doc="zero-shot image classification" +) + + +class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING + + +TFAutoModelForSemanticSegmentation = auto_class_update( + TFAutoModelForSemanticSegmentation, head_doc="semantic segmentation" +) + + +class TFAutoModelForVision2Seq(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING + + +TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc="vision-to-text modeling") + + +class TFAutoModelForMaskedLM(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING + + +TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling") + + +class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING + + +TFAutoModelForSeq2SeqLM = auto_class_update( + TFAutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" +) + + +class TFAutoModelForSequenceClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING + + +TFAutoModelForSequenceClassification = auto_class_update( + TFAutoModelForSequenceClassification, head_doc="sequence classification" +) + + +class TFAutoModelForQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering") + + +class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForDocumentQuestionAnswering = auto_class_update( + TFAutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', +) + + +class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForTableQuestionAnswering = auto_class_update( + TFAutoModelForTableQuestionAnswering, + head_doc="table question answering", + checkpoint_for_example="google/tapas-base-finetuned-wtq", +) + + +class TFAutoModelForTokenClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING + + +TFAutoModelForTokenClassification = auto_class_update( + TFAutoModelForTokenClassification, head_doc="token classification" +) + + +class TFAutoModelForMultipleChoice(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING + + +TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice") + + +class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING + + +TFAutoModelForNextSentencePrediction = auto_class_update( + TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction" +) + + +class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING + + +TFAutoModelForSpeechSeq2Seq = auto_class_update( + TFAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" +) + + +class TFAutoModelWithLMHead(_TFAutoModelWithLMHead): + @classmethod + def from_config(cls, config): + warnings.warn( + "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use" + " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models" + " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_config(config) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + warnings.warn( + "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use" + " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models" + " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/modified/models/auto/processing_auto.py b/modified/models/auto/processing_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..93dc6ab6050bb932c0ab2c2cb90b238e7437696b --- /dev/null +++ b/modified/models/auto/processing_auto.py @@ -0,0 +1,340 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" AutoProcessor class.""" +import importlib +import inspect +import json +import os +import warnings +from collections import OrderedDict + +# Build the list of all feature extractors +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...feature_extraction_utils import FeatureExtractionMixin +from ...image_processing_utils import ImageProcessingMixin +from ...tokenization_utils import TOKENIZER_CONFIG_FILE +from ...utils import FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + model_type_to_module_name, + replace_list_option_in_docstrings, +) +from .feature_extraction_auto import AutoFeatureExtractor +from .image_processing_auto import AutoImageProcessor +from .tokenization_auto import AutoTokenizer + + +logger = logging.get_logger(__name__) + +PROCESSOR_MAPPING_NAMES = OrderedDict( + [ + ("align", "AlignProcessor"), + ("altclip", "AltCLIPProcessor"), + ("bark", "BarkProcessor"), + ("blip", "BlipProcessor"), + ("blip-2", "Blip2Processor"), + ("bridgetower", "BridgeTowerProcessor"), + ("chinese_clip", "ChineseCLIPProcessor"), + ("clap", "ClapProcessor"), + ("clip", "CLIPProcessor"), + ("clipseg", "CLIPSegProcessor"), + ("clvp", "ClvpProcessor"), + ("flava", "FlavaProcessor"), + ("fuyu", "FuyuProcessor"), + ("git", "GitProcessor"), + ("groupvit", "CLIPProcessor"), + ("hubert", "Wav2Vec2Processor"), + ("idefics", "IdeficsProcessor"), + ("instructblip", "InstructBlipProcessor"), + ("kosmos-2", "Kosmos2Processor"), + ("layoutlmv2", "LayoutLMv2Processor"), + ("layoutlmv3", "LayoutLMv3Processor"), + ("llava", "LlavaProcessor"), + ("markuplm", "MarkupLMProcessor"), + ("mctct", "MCTCTProcessor"), + ("mgp-str", "MgpstrProcessor"), + ("oneformer", "OneFormerProcessor"), + ("owlv2", "Owlv2Processor"), + ("owlvit", "OwlViTProcessor"), + ("pix2struct", "Pix2StructProcessor"), + ("pop2piano", "Pop2PianoProcessor"), + ("sam", "SamProcessor"), + ("seamless_m4t", "SeamlessM4TProcessor"), + ("sew", "Wav2Vec2Processor"), + ("sew-d", "Wav2Vec2Processor"), + ("speech_to_text", "Speech2TextProcessor"), + ("speech_to_text_2", "Speech2Text2Processor"), + ("speecht5", "SpeechT5Processor"), + ("trocr", "TrOCRProcessor"), + ("tvlt", "TvltProcessor"), + ("tvp", "TvpProcessor"), + ("unispeech", "Wav2Vec2Processor"), + ("unispeech-sat", "Wav2Vec2Processor"), + ("vilt", "ViltProcessor"), + ("vipllava", "LlavaProcessor"), + ("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"), + ("wav2vec2", "Wav2Vec2Processor"), + ("wav2vec2-conformer", "Wav2Vec2Processor"), + ("wavlm", "Wav2Vec2Processor"), + ("whisper", "WhisperProcessor"), + ("xclip", "XCLIPProcessor"), + ] +) + +PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES) + + +def processor_class_from_name(class_name: str): + for module_name, processors in PROCESSOR_MAPPING_NAMES.items(): + if class_name in processors: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for processor in PROCESSOR_MAPPING._extra_content.values(): + if getattr(processor, "__name__", None) == class_name: + return processor + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +class AutoProcessor: + r""" + This is a generic processor class that will be instantiated as one of the processor classes of the library when + created with the [`AutoProcessor.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoProcessor is designed to be instantiated " + "using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the processor classes of the library from a pretrained model vocabulary. + + The processor class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible): + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or + namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a processor files saved using the `save_pretrained()` method, + e.g., `./my_model_directory/`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model feature extractor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the feature extractor files and override the cached versions + if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final feature extractor object. If `True`, then this + functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of + `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are feature extractor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + >>> from transformers import AutoProcessor + + >>> # Download processor from huggingface.co and cache. + >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") + + >>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*) + >>> # processor = AutoProcessor.from_pretrained("./test/saved_model/") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + + processor_class = None + processor_auto_map = None + + # First, let's see if we have a preprocessor config. + # Filter the kwargs for `get_file_from_repo`. + get_file_from_repo_kwargs = { + key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs + } + # Let's start by checking whether the processor class is saved in an image processor + preprocessor_config_file = get_file_from_repo( + pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs + ) + if preprocessor_config_file is not None: + config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + # If not found, let's check whether the processor class is saved in a feature extractor config + if preprocessor_config_file is not None and processor_class is None: + config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + if processor_class is None: + # Next, let's check whether the processor class is saved in a tokenizer + tokenizer_config_file = get_file_from_repo( + pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs + ) + if tokenizer_config_file is not None: + with open(tokenizer_config_file, encoding="utf-8") as reader: + config_dict = json.load(reader) + + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + if processor_class is None: + # Otherwise, load config, if it can be loaded. + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + + # And check if the config contains the processor class. + processor_class = getattr(config, "processor_class", None) + if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map: + processor_auto_map = config.auto_map["AutoProcessor"] + + if processor_class is not None: + processor_class = processor_class_from_name(processor_class) + + has_remote_code = processor_auto_map is not None + has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + processor_class = get_class_from_dynamic_module( + processor_auto_map, pretrained_model_name_or_path, **kwargs + ) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + processor_class.register_for_auto_class() + return processor_class.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + elif processor_class is not None: + return processor_class.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + # Last try: we use the PROCESSOR_MAPPING. + elif type(config) in PROCESSOR_MAPPING: + return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs) + + # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a + # tokenizer. + try: + return AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + try: + return AutoImageProcessor.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + pass + + try: + return AutoFeatureExtractor.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + pass + + raise ValueError( + f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a " + "tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains " + "the files of at least one of those processing classes." + ) + + @staticmethod + def register(config_class, processor_class, exist_ok=False): + """ + Register a new processor for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + processor_class ([`FeatureExtractorMixin`]): The processor to register. + """ + PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok) diff --git a/modified/models/auto/tokenization_auto.py b/modified/models/auto/tokenization_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..9e4066de99a5f9896691b3ba09828ed819336c17 --- /dev/null +++ b/modified/models/auto/tokenization_auto.py @@ -0,0 +1,862 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Tokenizer class.""" + +import importlib +import json +import os +import warnings +from collections import OrderedDict +from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union + +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE +from ...utils import cached_file, extract_commit_hash, is_sentencepiece_available, is_tokenizers_available, logging +from ..encoder_decoder import EncoderDecoderConfig +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + config_class_to_model_type, + model_type_to_module_name, + replace_list_option_in_docstrings, +) + + +if is_tokenizers_available(): + from ...tokenization_utils_fast import PreTrainedTokenizerFast +else: + PreTrainedTokenizerFast = None + + +logger = logging.get_logger(__name__) + +if TYPE_CHECKING: + # This significantly improves completion suggestion performance when + # the transformers package is used with Microsoft's Pylance language server. + TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict() +else: + TOKENIZER_MAPPING_NAMES = OrderedDict( + [ + ( + "albert", + ( + "AlbertTokenizer" if is_sentencepiece_available() else None, + "AlbertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("align", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("bark", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("bart", ("BartTokenizer", "BartTokenizerFast")), + ( + "barthez", + ( + "BarthezTokenizer" if is_sentencepiece_available() else None, + "BarthezTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("bartpho", ("BartphoTokenizer", None)), + ("bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("bert-generation", ("BertGenerationTokenizer" if is_sentencepiece_available() else None, None)), + ("bert-japanese", ("BertJapaneseTokenizer", None)), + ("bertweet", ("BertweetTokenizer", None)), + ( + "big_bird", + ( + "BigBirdTokenizer" if is_sentencepiece_available() else None, + "BigBirdTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("bigbird_pegasus", ("PegasusTokenizer", "PegasusTokenizerFast" if is_tokenizers_available() else None)), + ("biogpt", ("BioGptTokenizer", None)), + ("blenderbot", ("BlenderbotTokenizer", "BlenderbotTokenizerFast")), + ("blenderbot-small", ("BlenderbotSmallTokenizer", None)), + ("blip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("blip-2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("bloom", (None, "BloomTokenizerFast" if is_tokenizers_available() else None)), + ("bridgetower", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("bros", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("byt5", ("ByT5Tokenizer", None)), + ( + "camembert", + ( + "CamembertTokenizer" if is_sentencepiece_available() else None, + "CamembertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("canine", ("CanineTokenizer", None)), + ("chinese_clip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "clap", + ( + "RobertaTokenizer", + "RobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "clip", + ( + "CLIPTokenizer", + "CLIPTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "clipseg", + ( + "CLIPTokenizer", + "CLIPTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("clvp", ("ClvpTokenizer", None)), + ( + "code_llama", + ( + "CodeLlamaTokenizer" if is_sentencepiece_available() else None, + "CodeLlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("codegen", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)), + ("convbert", ("ConvBertTokenizer", "ConvBertTokenizerFast" if is_tokenizers_available() else None)), + ( + "cpm", + ( + "CpmTokenizer" if is_sentencepiece_available() else None, + "CpmTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("cpmant", ("CpmAntTokenizer", None)), + ("ctrl", ("CTRLTokenizer", None)), + ("data2vec-audio", ("Wav2Vec2CTCTokenizer", None)), + ("data2vec-text", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("deberta", ("DebertaTokenizer", "DebertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "deberta-v2", + ( + "DebertaV2Tokenizer" if is_sentencepiece_available() else None, + "DebertaV2TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("distilbert", ("DistilBertTokenizer", "DistilBertTokenizerFast" if is_tokenizers_available() else None)), + ( + "dpr", + ( + "DPRQuestionEncoderTokenizer", + "DPRQuestionEncoderTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)), + ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("ernie_m", ("ErnieMTokenizer" if is_sentencepiece_available() else None, None)), + ("esm", ("EsmTokenizer", None)), + ("falcon", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)), + ("flaubert", ("FlaubertTokenizer", None)), + ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)), + ("fsmt", ("FSMTTokenizer", None)), + ("funnel", ("FunnelTokenizer", "FunnelTokenizerFast" if is_tokenizers_available() else None)), + ("git", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("gpt-sw3", ("GPTSw3Tokenizer" if is_sentencepiece_available() else None, None)), + ("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gpt_bigcode", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neox", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neox_japanese", ("GPTNeoXJapaneseTokenizer", None)), + ("gptj", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gptsan-japanese", ("GPTSanJapaneseTokenizer", None)), + ("groupvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)), + ("hubert", ("Wav2Vec2CTCTokenizer", None)), + ("ibert", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("idefics", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("instructblip", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("jukebox", ("JukeboxTokenizer", None)), + ( + "kosmos-2", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)), + ("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)), + ("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)), + ("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)), + ("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)), + ("lilt", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)), + ( + "llama", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("llava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)), + ( + "longt5", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("luke", ("LukeTokenizer", None)), + ("lxmert", ("LxmertTokenizer", "LxmertTokenizerFast" if is_tokenizers_available() else None)), + ("m2m_100", ("M2M100Tokenizer" if is_sentencepiece_available() else None, None)), + ("marian", ("MarianTokenizer" if is_sentencepiece_available() else None, None)), + ( + "mbart", + ( + "MBartTokenizer" if is_sentencepiece_available() else None, + "MBartTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "mbart50", + ( + "MBart50Tokenizer" if is_sentencepiece_available() else None, + "MBart50TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("mega", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("megatron-bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("mgp-str", ("MgpstrTokenizer", None)), + ( + "mistral", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "mixtral", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("mluke", ("MLukeTokenizer" if is_sentencepiece_available() else None, None)), + ("mobilebert", ("MobileBertTokenizer", "MobileBertTokenizerFast" if is_tokenizers_available() else None)), + ("mpnet", ("MPNetTokenizer", "MPNetTokenizerFast" if is_tokenizers_available() else None)), + ("mpt", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("mra", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "mt5", + ( + "MT5Tokenizer" if is_sentencepiece_available() else None, + "MT5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("musicgen", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), + ("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)), + ("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "nllb", + ( + "NllbTokenizer" if is_sentencepiece_available() else None, + "NllbTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "nllb-moe", + ( + "NllbTokenizer" if is_sentencepiece_available() else None, + "NllbTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "nystromformer", + ( + "AlbertTokenizer" if is_sentencepiece_available() else None, + "AlbertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("oneformer", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ("openai-gpt", ("OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" if is_tokenizers_available() else None)), + ("opt", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("owlv2", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ("owlvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ( + "pegasus", + ( + "PegasusTokenizer" if is_sentencepiece_available() else None, + "PegasusTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "pegasus_x", + ( + "PegasusTokenizer" if is_sentencepiece_available() else None, + "PegasusTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "perceiver", + ( + "PerceiverTokenizer", + None, + ), + ), + ( + "persimmon", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("phi", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)), + ("phobert", ("PhobertTokenizer", None)), + ("pix2struct", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), + ("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)), + ("prophetnet", ("ProphetNetTokenizer", None)), + ("qdqbert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("rag", ("RagTokenizer", None)), + ("realm", ("RealmTokenizer", "RealmTokenizerFast" if is_tokenizers_available() else None)), + ( + "reformer", + ( + "ReformerTokenizer" if is_sentencepiece_available() else None, + "ReformerTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "rembert", + ( + "RemBertTokenizer" if is_sentencepiece_available() else None, + "RemBertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("retribert", ("RetriBertTokenizer", "RetriBertTokenizerFast" if is_tokenizers_available() else None)), + ("roberta", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "roberta-prelayernorm", + ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None), + ), + ("roc_bert", ("RoCBertTokenizer", None)), + ("roformer", ("RoFormerTokenizer", "RoFormerTokenizerFast" if is_tokenizers_available() else None)), + ("rwkv", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ( + "seamless_m4t", + ( + "SeamlessM4TTokenizer" if is_sentencepiece_available() else None, + "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "seamless_m4t_v2", + ( + "SeamlessM4TTokenizer" if is_sentencepiece_available() else None, + "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("speech_to_text", ("Speech2TextTokenizer" if is_sentencepiece_available() else None, None)), + ("speech_to_text_2", ("Speech2Text2Tokenizer", None)), + ("speecht5", ("SpeechT5Tokenizer" if is_sentencepiece_available() else None, None)), + ("splinter", ("SplinterTokenizer", "SplinterTokenizerFast")), + ( + "squeezebert", + ("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None), + ), + ( + "switch_transformers", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "t5", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("tapas", ("TapasTokenizer", None)), + ("tapex", ("TapexTokenizer", None)), + ("transfo-xl", ("TransfoXLTokenizer", None)), + ("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "umt5", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("vilt", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("vipllava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("visual_bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("vits", ("VitsTokenizer", None)), + ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)), + ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)), + ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)), + ("whisper", ("WhisperTokenizer", "WhisperTokenizerFast" if is_tokenizers_available() else None)), + ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ( + "xglm", + ( + "XGLMTokenizer" if is_sentencepiece_available() else None, + "XGLMTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("xlm", ("XLMTokenizer", None)), + ("xlm-prophetnet", ("XLMProphetNetTokenizer" if is_sentencepiece_available() else None, None)), + ( + "xlm-roberta", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "xlm-roberta-xl", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "xlnet", + ( + "XLNetTokenizer" if is_sentencepiece_available() else None, + "XLNetTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "xmod", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "yoso", + ( + "AlbertTokenizer" if is_sentencepiece_available() else None, + "AlbertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ] + ) + +TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES) + +CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()} + + +def tokenizer_class_from_name(class_name: str): + if class_name == "PreTrainedTokenizerFast": + return PreTrainedTokenizerFast + + for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items(): + if class_name in tokenizers: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for config, tokenizers in TOKENIZER_MAPPING._extra_content.items(): + for tokenizer in tokenizers: + if getattr(tokenizer, "__name__", None) == class_name: + return tokenizer + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +def get_tokenizer_config( + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", + **kwargs, +): + """ + Loads the tokenizer configuration from a pretrained model tokenizer configuration. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the tokenizer config is located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Dict`: The configuration of the tokenizer. + + Examples: + + ```python + # Download configuration from huggingface.co and cache. + tokenizer_config = get_tokenizer_config("bert-base-uncased") + # This model does not have a tokenizer config so the result will be an empty dict. + tokenizer_config = get_tokenizer_config("xlm-roberta-base") + + # Save a pretrained tokenizer locally and you can reload its config + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + tokenizer.save_pretrained("tokenizer-test") + tokenizer_config = get_tokenizer_config("tokenizer-test") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + commit_hash = kwargs.get("_commit_hash", None) + resolved_config_file = cached_file( + pretrained_model_name_or_path, + TOKENIZER_CONFIG_FILE, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, + ) + if resolved_config_file is None: + logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.") + return {} + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + + with open(resolved_config_file, encoding="utf-8") as reader: + result = json.load(reader) + result["_commit_hash"] = commit_hash + return result + + +class AutoTokenizer: + r""" + This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when + created with the [`AutoTokenizer.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoTokenizer is designed to be instantiated " + "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): + r""" + Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary. + + The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by + falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved + using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a + single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not + applicable to all derived classes) + inputs (additional positional arguments, *optional*): + Will be passed along to the Tokenizer `__init__()` method. + config ([`PretrainedConfig`], *optional*) + The configuration object used to determine the tokenizer class to instantiate. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download the model weights and configuration files and override the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + subfolder (`str`, *optional*): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for + facebook/rag-token-base), specify it here. + use_fast (`bool`, *optional*, defaults to `True`): + Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for + a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer + is returned instead. + tokenizer_type (`str`, *optional*): + Tokenizer type to be loaded. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (additional keyword arguments, *optional*): + Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like + `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, + `additional_special_tokens`. See parameters in the `__init__()` for more details. + + Examples: + + ```python + >>> from transformers import AutoTokenizer + + >>> # Download vocabulary from huggingface.co and cache. + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + + >>> # Download vocabulary from huggingface.co (user-uploaded) and cache. + >>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased") + + >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) + >>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/") + + >>> # Download vocabulary from huggingface.co and define model-specific arguments + >>> tokenizer = AutoTokenizer.from_pretrained("roberta-base", add_prefix_space=True) + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + kwargs["_from_auto"] = True + + use_fast = kwargs.pop("use_fast", True) + tokenizer_type = kwargs.pop("tokenizer_type", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + + # First, let's see whether the tokenizer_type is passed so that we can leverage it + if tokenizer_type is not None: + tokenizer_class = None + tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None) + + if tokenizer_class_tuple is None: + raise ValueError( + f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of " + f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}." + ) + + tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple + + if use_fast: + if tokenizer_fast_class_name is not None: + tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name) + else: + logger.warning( + "`use_fast` is set to `True` but the tokenizer class does not have a fast version. " + " Falling back to the slow version." + ) + if tokenizer_class is None: + tokenizer_class = tokenizer_class_from_name(tokenizer_class_name) + + if tokenizer_class is None: + raise ValueError(f"Tokenizer class {tokenizer_class_name} is not currently imported.") + + return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + + # Next, let's try to use the tokenizer_config file to get the tokenizer class. + tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs) + if "_commit_hash" in tokenizer_config: + kwargs["_commit_hash"] = tokenizer_config["_commit_hash"] + config_tokenizer_class = tokenizer_config.get("tokenizer_class") + tokenizer_auto_map = None + if "auto_map" in tokenizer_config: + if isinstance(tokenizer_config["auto_map"], (tuple, list)): + # Legacy format for dynamic tokenizers + tokenizer_auto_map = tokenizer_config["auto_map"] + else: + tokenizer_auto_map = tokenizer_config["auto_map"].get("AutoTokenizer", None) + + # If that did not work, let's try to use the config. + if config_tokenizer_class is None: + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + config_tokenizer_class = config.tokenizer_class + if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map: + tokenizer_auto_map = config.auto_map["AutoTokenizer"] + + has_remote_code = tokenizer_auto_map is not None + has_local_code = config_tokenizer_class is not None or type(config) in TOKENIZER_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + if use_fast and tokenizer_auto_map[1] is not None: + class_ref = tokenizer_auto_map[1] + else: + class_ref = tokenizer_auto_map[0] + tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + tokenizer_class.register_for_auto_class() + return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + elif config_tokenizer_class is not None: + tokenizer_class = None + if use_fast and not config_tokenizer_class.endswith("Fast"): + tokenizer_class_candidate = f"{config_tokenizer_class}Fast" + tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) + if tokenizer_class is None: + tokenizer_class_candidate = config_tokenizer_class + tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) + if tokenizer_class is None: + raise ValueError( + f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported." + ) + return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + + # Otherwise we have to be creative. + # if model is an encoder decoder, the encoder tokenizer class is used by default + if isinstance(config, EncoderDecoderConfig): + if type(config.decoder) is not type(config.encoder): # noqa: E721 + logger.warning( + f"The encoder model config class: {config.encoder.__class__} is different from the decoder model " + f"config class: {config.decoder.__class__}. It is not recommended to use the " + "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder " + "specific tokenizer classes." + ) + config = config.encoder + + model_type = config_class_to_model_type(type(config).__name__) + if model_type is not None: + tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)] + if tokenizer_class_fast and (use_fast or tokenizer_class_py is None): + return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + else: + if tokenizer_class_py is not None: + return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + else: + raise ValueError( + "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed " + "in order to use this tokenizer." + ) + + raise ValueError( + f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n" + f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}." + ) + + def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False): + """ + Register a new tokenizer in this mapping. + + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + slow_tokenizer_class ([`PretrainedTokenizer`], *optional*): + The slow tokenizer to register. + fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*): + The fast tokenizer to register. + """ + if slow_tokenizer_class is None and fast_tokenizer_class is None: + raise ValueError("You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class") + if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast): + raise ValueError("You passed a fast tokenizer in the `slow_tokenizer_class`.") + if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer): + raise ValueError("You passed a slow tokenizer in the `fast_tokenizer_class`.") + + if ( + slow_tokenizer_class is not None + and fast_tokenizer_class is not None + and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) + and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class + ): + raise ValueError( + "The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not " + "consistent with the slow tokenizer class you passed (fast tokenizer has " + f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those " + "so they match!" + ) + + # Avoid resetting a set slow/fast tokenizer if we are passing just the other ones. + if config_class in TOKENIZER_MAPPING._extra_content: + existing_slow, existing_fast = TOKENIZER_MAPPING[config_class] + if slow_tokenizer_class is None: + slow_tokenizer_class = existing_slow + if fast_tokenizer_class is None: + fast_tokenizer_class = existing_fast + + TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok) diff --git a/modified/models/phi/__init__.py b/modified/models/phi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba79ac81a6b9e55c3881c27d7772109113b9d803 --- /dev/null +++ b/modified/models/phi/__init__.py @@ -0,0 +1,69 @@ +# Copyright 2023 Microsoft and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_sentencepiece_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_phi"] = [ + "PHI_PRETRAINED_MODEL_ARCHIVE_LIST", + "PhiPreTrainedModel", + "PhiModel", + "PhiForCausalLM", + "PhiForSequenceClassification", + "PhiForTokenClassification", + ] + + +if TYPE_CHECKING: + from .configuration_phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_phi import ( + PHI_PRETRAINED_MODEL_ARCHIVE_LIST, + PhiForCausalLM, + PhiForSequenceClassification, + PhiForTokenClassification, + PhiModel, + PhiPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/modified/models/phi/__pycache__/__init__.cpython-39.pyc b/modified/models/phi/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8199ae09729c2c4f4130c05903c38ba43b947ff Binary files /dev/null and b/modified/models/phi/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/models/phi/__pycache__/configuration_phi.cpython-39.pyc b/modified/models/phi/__pycache__/configuration_phi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6700c76d36d8b5b0e1d1d8beefb75bd8c8c89fae Binary files /dev/null and b/modified/models/phi/__pycache__/configuration_phi.cpython-39.pyc differ diff --git a/modified/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-39.pyc b/modified/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28b9b18e699baa7c46779624c63c359cb4e06a99 Binary files /dev/null and b/modified/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-39.pyc differ diff --git a/modified/models/phi/__pycache__/modeling_phi.cpython-39.pyc b/modified/models/phi/__pycache__/modeling_phi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6517aa8e3de272adf64e0062d955cb8eba93bd86 Binary files /dev/null and b/modified/models/phi/__pycache__/modeling_phi.cpython-39.pyc differ diff --git a/modified/models/phi/configuration_phi.py b/modified/models/phi/configuration_phi.py new file mode 100644 index 0000000000000000000000000000000000000000..5025ef798ff95d5067e0882c1fe35e7c4dff38fa --- /dev/null +++ b/modified/models/phi/configuration_phi.py @@ -0,0 +1,180 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Phi model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +PHI_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "susnato/phi-1_dev": "https://huggingface.co/susnato/phi-1_dev/resolve/main/config.json", + "susnato/phi-1_5_dev": "https://huggingface.co/susnato/phi-1_5_dev/resolve/main/config.json", +} + + +class PhiConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Phi + [susnato/phi-1_dev](https://huggingface.co/susnato/phi-1_dev). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 51200): + Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`PhiModel`]. + hidden_size (`int`, *optional*, defaults to 2048): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 8192): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + resid_pdrop (`float`, *optional*, defaults to 0.0): + Dropout probability for mlp outputs. + embd_pdrop (`int`, *optional*, defaults to 0.0): + The dropout ratio for the embeddings. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio after computing the attention scores. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048 + tokens. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling + strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format + is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. See the following thread for more information on how + these scaling strategies behave: + https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This + is an experimental feature, subject to breaking API changes in future versions. + partial_rotary_factor (`float`, *optional*, defaults to 0.5): + Percentage of the query and keys which will have rotary embedding. + qk_layernorm (`bool`, *optional*, defaults to `False`): + Whether or not to normalize the Queries and Keys after projecting the hidden states + bos_token_id (`int`, *optional*, defaults to 1): + Denotes beginning of sequences token id. + eos_token_id (`int`, *optional*, defaults to 2): + Denotes end of sequences token id. + + Example: + + ```python + >>> from transformers import PhiModel, PhiConfig + + >>> # Initializing a Phi-1 style configuration + >>> configuration = PhiConfig.from_pretrained("susnato/phi-1_dev") + + >>> # Initializing a model from the configuration + >>> model = PhiModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "phi" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=51200, + hidden_size=2048, + intermediate_size=8192, + num_hidden_layers=24, + num_attention_heads=32, + resid_pdrop=0.0, + embd_pdrop=0.0, + attention_dropout=0.0, + hidden_act="gelu_new", + max_position_embeddings=2048, + initializer_range=0.02, + layer_norm_eps=1e-5, + use_cache=True, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + partial_rotary_factor=0.5, + qk_layernorm=False, + bos_token_id=1, + eos_token_id=2, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attention_dropout = attention_dropout + self.hidden_act = hidden_act + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.partial_rotary_factor = partial_rotary_factor + self.qk_layernorm = qk_layernorm + self._rope_scaling_validation() + + super().__init__( + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " + f"got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") diff --git a/modified/models/phi/convert_phi_weights_to_hf.py b/modified/models/phi/convert_phi_weights_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..36d6eeb3e635a5de5c3fce0e0c8b3a630d3572eb --- /dev/null +++ b/modified/models/phi/convert_phi_weights_to_hf.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Weights conversion script for Phi + +This script downloads both Phi-1 and Phi-1.5 checkpoints to "checkpoint_path" and then converts the weights to +HugfgingFace model's format and saves them in "pytorch_dump_folder_path". +""" + +import argparse +import gc +import os + +import torch +from huggingface_hub import hf_hub_download + +from transformers import PhiConfig, PhiForCausalLM + + +_MODELS = { + "microsoft/phi-1": "https://huggingface.co/microsoft/phi-1/blob/main/pytorch_model.bin", + "microsoft/phi-1_5": "https://huggingface.co/microsoft/phi-1_5/blob/main/pytorch_model.bin", +} + + +PHI_MAPPING = { + "layers.0.wte.weight": "model.embed_tokens.weight", + "layers.25.linear.bias": "lm_head.bias", + "layers.25.linear.weight": "lm_head.weight", + "layers.25.ln.bias": "model.final_layernorm.bias", + "layers.25.ln.weight": "model.final_layernorm.weight", + "layers": "model.layers", + "ln": "input_layernorm", + "mixer": "self_attn", + "Wqkv": "query_key_value", + "out_proj": "dense", +} + + +def convert_weights(original_weights, mapping, config): + converted_weights = {} + original_weights_keys = sorted(original_weights.keys()) + + # we change names (1-24) -> layers(0-23) for Phi model layers + range_change = { + f"layers.{k}.": f"layers.{v}." + for k, v in zip(range(1, config.num_hidden_layers + 1), range(0, config.num_hidden_layers)) + } + + mapping.update(**range_change) + + for original_weights_key in original_weights_keys: + new_key = original_weights_key + + if "rotary_emb" in new_key: + continue + + if "Wqkv" in new_key: + if "weight" in new_key: + weight = original_weights[new_key] + weights_shape = weight.shape + weight = ( + weight.view(3, config.num_attention_heads, -1, config.hidden_size) + .transpose(0, 1) + .reshape(*weights_shape) + ) + original_weights[new_key] = weight + elif "bias" in new_key: + bias = original_weights[new_key] + bias_shape = bias.shape + bias = bias.view(3, config.num_attention_heads, -1).transpose(0, 1).reshape(*bias_shape) + original_weights[new_key] = bias + + for k, v in mapping.items(): + if k in new_key: + new_key = new_key.replace(k, v) + + converted_weights[new_key] = original_weights.pop(original_weights_key) + + return converted_weights + + +def _download(url: str, root: str): + repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}" + filename = f"{url.split('/')[-1]}" + hf_hub_download( + repo_id=repo_id, + filename=filename, + force_filename=root, + local_dir_use_symlinks=False, + ) + + +def convert_phi_weights(checkpoint_path, pytorch_dump_folder_path, use_cuda, save_weights_directly): + device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" + for each_model_name, each_model_url in _MODELS.items(): + converted_checkpoint = {} + + model_path = os.path.join(checkpoint_path, each_model_name + "_" + each_model_url.split("/")[-1]) + if not os.path.exists(model_path): + print(f"\n{each_model_name} was not found! Downloading it to {model_path}") + _download(url=each_model_url, root=model_path) + model_checkpoint = torch.load(model_path, map_location=device) + model_type = each_model_name.split("/")[1] # phi-1 or phi-1_5 + config = PhiConfig.from_pretrained(f"susnato/{model_type}_dev") + + # Converting the weights + converted_checkpoint.update(**convert_weights(model_checkpoint, PHI_MAPPING, config)) + + # Save either the whole model or the converted weights + if save_weights_directly: + save_weights_path = os.path.join( + pytorch_dump_folder_path, each_model_name.split("/")[-1] + "_" + each_model_url.split("/")[-1] + ) + torch.save(converted_checkpoint, save_weights_path) + print(f"Model weights saved at {save_weights_path}!") + + else: + model = PhiForCausalLM(config).to(device) + model.load_state_dict(converted_checkpoint, strict=True) + save_model_path = os.path.join(pytorch_dump_folder_path, model_type) + model.save_pretrained(save_model_path) + print(f"Model saved at {save_model_path}!") + + # release GPU memory for the 2nd model if cuda was used. + del config, model + + # release GPU memory for the 2nd model if cuda was used. + del model_checkpoint, converted_checkpoint + if use_cuda: + torch.cuda.empty_cache() + gc.collect() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # # Required parameters + parser.add_argument( + "--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)" + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + help="Path to the output PyTorch model. (Please enter full path)", + ) + parser.add_argument( + "--use_cuda", + default=False, + type=bool, + help="Whether to load the weights on GPU during conversion or not, False by default", + ) + parser.add_argument( + "--save_weights_directly", + default=True, + type=bool, + help="Whether to save the weights directly after conversion or load the weight to the Phi model and then save " + "the Phi model along with weights. True by default", + ) + + args = parser.parse_args() + convert_phi_weights(args.checkpoint_path, args.pytorch_dump_folder_path, args.use_cuda, args.save_weights_directly) diff --git a/modified/models/phi/modeling_phi.py b/modified/models/phi/modeling_phi.py new file mode 100644 index 0000000000000000000000000000000000000000..afb3814c45a39056cf1f141c9a3beb1b61a850d7 --- /dev/null +++ b/modified/models/phi/modeling_phi.py @@ -0,0 +1,1383 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" PyTorch Phi model.""" + + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache +from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_phi import PhiConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "susnato/phi-1_dev" +_CONFIG_FOR_DOC = "PhiConfig" + +PHI_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "susnato/phi-1_dev", + "susnato/phi-1_5_dev", + # See all Phi models at https://huggingface.co/models?filter=phi +] + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Phi +class PhiRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Phi +class PhiLinearScalingRotaryEmbedding(PhiRotaryEmbedding): + """PhiRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Phi +class PhiDynamicNTKScalingRotaryEmbedding(PhiRotaryEmbedding): + """PhiRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Phi +class PhiMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.persimmon.modeling_persimmon.PersimmonAttention with Persimmon->Phi,persimmon->phi +class PhiAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: PhiConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.partial_rotary_factor = config.partial_rotary_factor + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) + self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True) + # self.qk_layernorm = config.qk_layernorm + + # if self.qk_layernorm: + self.q_layernorm = nn.LayerNorm( + config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True + ) + self.k_layernorm = nn.LayerNorm( + config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True + ) + self.attention_dropout = nn.Dropout(config.attention_dropout) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = PhiRotaryEmbedding( + int(self.partial_rotary_factor * self.head_dim), + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = PhiLinearScalingRotaryEmbedding( + int(self.partial_rotary_factor * self.head_dim), + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = PhiDynamicNTKScalingRotaryEmbedding( + int(self.partial_rotary_factor * self.head_dim), + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + # Copied from transformers.models.bloom.modeling_bloom.BloomAttention._split_heads + def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory + storage as `fused_qkv` + + Args: + fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] + + Returns: + query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] + value: [batch_size, seq_length, num_heads, head_dim] + """ + batch_size, seq_length, three_times_hidden_size = fused_qkv.shape + fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) + return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + # [batch_size, seq_length, 3 x hidden_size] + fused_qkv = self.query_key_value(hidden_states) + + # 3 x [batch_size, seq_length, num_heads, head_dim] + (query_states, key_states, value_states) = self._split_heads(fused_qkv) + + # if self.qk_layernorm: + query_states = self.q_layernorm(query_states) + key_states = self.k_layernorm(key_states) + + # [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim] + query_states = query_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + # Partial rotary embedding + query_rot, query_pass = ( + query_states[..., : self.rotary_emb.dim], + query_states[..., self.rotary_emb.dim :], + ) + key_rot, key_pass = ( + key_states[..., : self.rotary_emb.dim], + key_states[..., self.rotary_emb.dim :], + ) + # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] + query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) + + # [batch_size, seq_length, num_heads, head_dim] + query_states = torch.cat((query_rot, query_pass), dim=-1) + key_states = torch.cat((key_rot, key_pass), dim=-1) + + if past_key_value is not None: + # Specific to RoPE models with partial rotation + cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query_states.dtype) + attn_weights = self.attention_dropout(attn_weights) + + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.dense(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class PhiFlashAttention2(PhiAttention): + """ + Phi flash attention module. This module inherits from `PhiAttention` as the weights of the module stays untouched. + The only required change would be on the forward pass where it needs to correctly call the public API of flash + attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # PhiFlashAttention2 attention does not support output_attentions + + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + # [batch_size, seq_length, 3 x hidden_size] + fused_qkv = self.query_key_value(hidden_states) + + # 3 x [batch_size, seq_length, num_heads, head_dim] + (query_states, key_states, value_states) = self._split_heads(fused_qkv) + + if self.qk_layernorm: + query_states = self.q_layernorm(query_states) + key_states = self.k_layernorm(key_states) + + # [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim] + query_states = query_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + # Partial rotary embedding + query_rot, query_pass = ( + query_states[..., : self.rotary_emb.dim], + query_states[..., self.rotary_emb.dim :], + ) + key_rot, key_pass = ( + key_states[..., : self.rotary_emb.dim], + key_states[..., self.rotary_emb.dim :], + ) + # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] + query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) + + # [batch_size, seq_length, num_heads, head_dim] + query_states = torch.cat((query_rot, query_pass), dim=-1) + key_states = torch.cat((key_rot, key_pass), dim=-1) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + tgt_len = key_states.shape[2] + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + query_states = query_states.transpose(1, 2).view(bsz, q_len, self.num_heads, self.head_dim) + key_states = key_states.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + value_states = value_states.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + + attn_dropout = self.config.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. + + if query_states.dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=attn_dropout, softmax_scale=1.0 + ) + + attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) + attn_output = self.dense(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +PHI_ATTENTION_CLASSES = { + "eager": PhiAttention, + "flash_attention_2": PhiFlashAttention2, +} + + + +class PhiDecoderLayer(nn.Module): + def __init__(self, config: PhiConfig, layer_idx: int): + super().__init__() + self.self_attn = PHI_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) + self.mlp = PhiMLP(config) + self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + self.post_layernorm = LlamaRMSNorm(config.hidden_size) + + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): + input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range + `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + attn_outputs, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + attn_outputs = self.post_layernorm(attn_outputs) + attn_outputs = self.resid_dropout(attn_outputs) + + feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states)) + hidden_states = attn_outputs + feed_forward_hidden_states + residual + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +PHI_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`PhiConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Phi Model outputting raw hidden-states without any specific head on top.", + PHI_START_DOCSTRING, +) +class PhiPreTrainedModel(PreTrainedModel): + config_class = PhiConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +PHI_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Phi Model outputting raw hidden-states without any specific head on top.", + PHI_START_DOCSTRING, +) +class PhiModel(PhiPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PhiDecoderLayer`] + + Args: + config: PhiConfig + """ + + def __init__(self, config: PhiConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.embed_dropout = nn.Dropout(config.embd_pdrop) + self.layers = nn.ModuleList( + [PhiDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + past_key_values_length = 0 + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + inputs_embeds = self.embed_dropout(inputs_embeds) + + # Attention mask. + if self._use_flash_attention_2: + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + i = 0 + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + if torch.isnan(hidden_states).any(): + print(f"NaN detected in output {i}") + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + i = i + 1 + + hidden_states = self.final_layernorm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class PhiForCausalLM(PhiPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi,bias=False->bias=True + def __init__(self, config): + super().__init__(config) + self.model = PhiModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings + def get_input_embeddings(self): + return self.model.embed_tokens + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder + def set_decoder(self, decoder): + self.model = decoder + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, PhiForCausalLM + + >>> model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev") + >>> tokenizer = AutoTokenizer.from_pretrained("susnato/phi-1_5_dev") + + >>> prompt = "This is an example script ." + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'This is an example script .py file that uses the `os` module to create a new directory and write some text to it.\n\n``' + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The PhiModel with a sequence classification head on top (linear layer). + + [`PhiForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + PHI_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->PHI,Llama->Phi with self.transformer->self.model, transformer_outputs->model_outputs +class PhiForSequenceClassification(PhiPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = PhiModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = model_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( + logits.device + ) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + model_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=model_outputs.past_key_values, + hidden_states=model_outputs.hidden_states, + attentions=model_outputs.attentions, + ) + + +@add_start_docstrings( + """ + PhiModel with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + PHI_START_DOCSTRING, +) +# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with MPT->PHI,Mpt->Phi,self.transformer->self.model,transformer_outputs->model_outputs +class PhiForTokenClassification(PhiPreTrainedModel): + def __init__(self, config: PhiConfig): + super().__init__(config) + self.num_labels = config.num_labels + + self.model = PhiModel(config) + if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: + classifier_dropout = config.classifier_dropout + elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **deprecated_arguments, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_outputs = self.model( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = model_outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + batch_size, seq_length = labels.shape + loss_fct = CrossEntropyLoss() + loss = loss_fct( + logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) + ) + + if not return_dict: + output = (logits,) + model_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=model_outputs.hidden_states, + attentions=model_outputs.attentions, + ) diff --git a/modified/optimization.py b/modified/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..b3861b371a2393351729eadd53e550c51fe8bad1 --- /dev/null +++ b/modified/optimization.py @@ -0,0 +1,796 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for BERT model.""" + +import math +import warnings +from functools import partial +from typing import Callable, Iterable, Optional, Tuple, Union + +import torch +from torch import nn +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau + +from .trainer_utils import SchedulerType +from .utils import logging +from .utils.versions import require_version + + +logger = logging.get_logger(__name__) + + +def _get_constant_lambda(_=None): + return 1 + + +def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + return LambdaLR(optimizer, _get_constant_lambda, last_epoch=last_epoch) + + +def get_reduce_on_plateau_schedule(optimizer: Optimizer, **kwargs): + """ + Create a schedule with a constant learning rate that decreases when a metric has stopped improving. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + kwargs (`dict`, *optional*): + Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau` + for possible parameters. + + Return: + `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule. + """ + + return ReduceLROnPlateau(optimizer, **kwargs) + + +def _get_constant_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + +def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps) + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +def _get_linear_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) + + +def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): + """ + Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after + a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial( + _get_linear_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_cosine_schedule_with_warmup_lr_lambda( + current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`float`, *optional*, defaults to 0.5): + The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 + following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial( + _get_cosine_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda( + current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + if progress >= 1.0: + return 0.0 + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) + + +def get_cosine_with_hard_restarts_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases + linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`int`, *optional*, defaults to 1): + The number of hard restarts to use. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial( + _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_polynomial_decay_schedule_with_warmup_lr_lambda( + current_step: int, + *, + num_warmup_steps: int, + num_training_steps: int, + lr_end: float, + power: float, + lr_init: int, +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining**power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + +def get_polynomial_decay_schedule_with_warmup( + optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 +): + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") + + lr_lambda = partial( + _get_polynomial_decay_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + lr_end=lr_end, + power=power, + lr_init=lr_init, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_inverse_sqrt_schedule_lr_lambda(current_step: int, *, num_warmup_steps: int, timescale: int = None): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + shift = timescale - num_warmup_steps + decay = 1.0 / math.sqrt((current_step + shift) / timescale) + return decay + + +def get_inverse_sqrt_schedule( + optimizer: Optimizer, num_warmup_steps: int, timescale: int = None, last_epoch: int = -1 +): + """ + Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a + warmup period which increases lr linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + timescale (`int`, *optional*, defaults to `num_warmup_steps`): + Time scale. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + # Note: this implementation is adapted from + # https://github.com/google-research/big_vision/blob/f071ce68852d56099437004fd70057597a95f6ef/big_vision/utils.py#L930 + + if timescale is None: + timescale = num_warmup_steps + + lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale) + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +TYPE_TO_SCHEDULER_FUNCTION = { + SchedulerType.LINEAR: get_linear_schedule_with_warmup, + SchedulerType.COSINE: get_cosine_schedule_with_warmup, + SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, + SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, + SchedulerType.CONSTANT: get_constant_schedule, + SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, + SchedulerType.INVERSE_SQRT: get_inverse_sqrt_schedule, + SchedulerType.REDUCE_ON_PLATEAU: get_reduce_on_plateau_schedule, +} + + +def get_scheduler( + name: Union[str, SchedulerType], + optimizer: Optimizer, + num_warmup_steps: Optional[int] = None, + num_training_steps: Optional[int] = None, + scheduler_specific_kwargs: Optional[dict] = None, +): + """ + Unified API to get any scheduler from its name. + + Args: + name (`str` or `SchedulerType`): + The name of the scheduler to use. + optimizer (`torch.optim.Optimizer`): + The optimizer that will be used during training. + num_warmup_steps (`int`, *optional*): + The number of warmup steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_training_steps (`int``, *optional*): + The number of training steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + scheduler_specific_kwargs (`dict`, *optional*): + Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler + parameters will cause the scheduler function to raise a TypeError. + """ + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + if name == SchedulerType.CONSTANT: + return schedule_func(optimizer) + + if scheduler_specific_kwargs is None: + scheduler_specific_kwargs = {} + + if name == SchedulerType.REDUCE_ON_PLATEAU: + return schedule_func(optimizer, **scheduler_specific_kwargs) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) + + if name == SchedulerType.INVERSE_SQRT: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + **scheduler_specific_kwargs, + ) + + +class AdamW(Optimizer): + """ + Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay + Regularization](https://arxiv.org/abs/1711.05101). + + Parameters: + params (`Iterable[nn.parameter.Parameter]`): + Iterable of parameters to optimize or dictionaries defining parameter groups. + lr (`float`, *optional*, defaults to 0.001): + The learning rate to use. + betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`): + Adam's betas parameters (b1, b2). + eps (`float`, *optional*, defaults to 1e-06): + Adam's epsilon for numerical stability. + weight_decay (`float`, *optional*, defaults to 0.0): + Decoupled weight decay to apply. + correct_bias (`bool`, *optional*, defaults to `True`): + Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`). + no_deprecation_warning (`bool`, *optional*, defaults to `False`): + A flag used to disable the deprecation warning (set to `True` to disable the warning). + """ + + def __init__( + self, + params: Iterable[nn.parameter.Parameter], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-6, + weight_decay: float = 0.0, + correct_bias: bool = True, + no_deprecation_warning: bool = False, + ): + if not no_deprecation_warning: + warnings.warn( + "This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch" + " implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this" + " warning", + FutureWarning, + ) + require_version("torch>=1.5.0") # add_ with alpha + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0") + defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias} + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure: Callable = None): + """ + Performs a single optimization step. + + Arguments: + closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + beta1, beta2 = group["betas"] + + state["step"] += 1 + + # Decay the first and second moment running average coefficient + # In-place operations to update the averages at the same time + exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) + denom = exp_avg_sq.sqrt().add_(group["eps"]) + + step_size = group["lr"] + if group["correct_bias"]: # No bias correction for Bert + bias_correction1 = 1.0 - beta1 ** state["step"] + bias_correction2 = 1.0 - beta2 ** state["step"] + step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + # Add weight decay at the end (fixed version) + if group["weight_decay"] > 0.0: + p.add_(p, alpha=(-group["lr"] * group["weight_decay"])) + + return loss + + +class Adafactor(Optimizer): + """ + AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: + https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + + Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that + this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and + `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (`Iterable[nn.parameter.Parameter]`): + Iterable of parameters to optimize or dictionaries defining parameter groups. + lr (`float`, *optional*): + The external learning rate. + eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`): + Regularization constants for square gradient and parameter scale respectively + clip_threshold (`float`, *optional*, defaults to 1.0): + Threshold of root mean square of final gradient update + decay_rate (`float`, *optional*, defaults to -0.8): + Coefficient used to compute running averages of square + beta1 (`float`, *optional*): + Coefficient used for computing running averages of gradient + weight_decay (`float`, *optional*, defaults to 0.0): + Weight decay (L2 penalty) + scale_parameter (`bool`, *optional*, defaults to `True`): + If True, learning rate is scaled by root mean square + relative_step (`bool`, *optional*, defaults to `True`): + If True, time-dependent learning rate is computed instead of external learning rate + warmup_init (`bool`, *optional*, defaults to `False`): + Time-dependent learning rate computation depends on whether warm-up initialization is being used + + This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. + + Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): + + - Training without LR warmup or clip_threshold is not recommended. + + - use scheduled LR warm-up to fixed LR + - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) + - Disable relative updates + - Use scale_parameter=False + - Additional optimizer operations like gradient clipping should not be used alongside Adafactor + + Example: + + ```python + Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) + ``` + + Others reported the following combination to work well: + + ```python + Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) + ``` + + When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] + scheduler as following: + + ```python + from transformers.optimization import Adafactor, AdafactorSchedule + + optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) + lr_scheduler = AdafactorSchedule(optimizer) + trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) + ``` + + Usage: + + ```python + # replace AdamW with Adafactor + optimizer = Adafactor( + model.parameters(), + lr=1e-3, + eps=(1e-30, 1e-3), + clip_threshold=1.0, + decay_rate=-0.8, + beta1=None, + weight_decay=0.0, + relative_step=False, + scale_parameter=False, + warmup_init=False, + ) + ```""" + + def __init__( + self, + params, + lr=None, + eps=(1e-30, 1e-3), + clip_threshold=1.0, + decay_rate=-0.8, + beta1=None, + weight_decay=0.0, + scale_parameter=True, + relative_step=True, + warmup_init=False, + ): + require_version("torch>=1.5.0") # add_ with alpha + if lr is not None and relative_step: + raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") + if warmup_init and not relative_step: + raise ValueError("`warmup_init=True` requires `relative_step=True`") + + defaults = { + "lr": lr, + "eps": eps, + "clip_threshold": clip_threshold, + "decay_rate": decay_rate, + "beta1": beta1, + "weight_decay": weight_decay, + "scale_parameter": scale_parameter, + "relative_step": relative_step, + "warmup_init": warmup_init, + } + super().__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + rel_step_sz = param_group["lr"] + if param_group["relative_step"]: + min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 + rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) + param_scale = 1.0 + if param_group["scale_parameter"]: + param_scale = max(param_group["eps"][1], param_state["RMS"]) + return param_scale * rel_step_sz + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group["beta1"] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + @staticmethod + def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): + # copy from fairseq's adafactor implementation: + # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError("Adafactor does not support sparse gradients.") + + state = self.state[p] + grad_shape = grad.shape + + factored, use_first_moment = self._get_options(group, grad_shape) + # State Initialization + if len(state) == 0: + state["step"] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(grad) + if factored: + state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) + state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) + else: + state["exp_avg_sq"] = torch.zeros_like(grad) + + state["RMS"] = 0 + else: + if use_first_moment: + state["exp_avg"] = state["exp_avg"].to(grad) + if factored: + state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) + state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) + else: + state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) + + p_data_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_data_fp32 = p_data_fp32.float() + + state["step"] += 1 + state["RMS"] = self._rms(p_data_fp32) + lr = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) + update = (grad**2) + group["eps"][0] + if factored: + exp_avg_sq_row = state["exp_avg_sq_row"] + exp_avg_sq_col = state["exp_avg_sq_col"] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state["exp_avg_sq"] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) + update.mul_(lr) + + if use_first_moment: + exp_avg = state["exp_avg"] + exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) + update = exp_avg + + if group["weight_decay"] != 0: + p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) + + p_data_fp32.add_(-update) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_data_fp32) + + return loss + + +class AdafactorSchedule(LambdaLR): + """ + Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g., + for logging), this class creates a proxy object that retrieves the current lr values from the optimizer. + + It returns `initial_lr` during startup and the actual `lr` during stepping. + """ + + def __init__(self, optimizer, initial_lr=0.0): + def lr_lambda(_): + return initial_lr + + for group in optimizer.param_groups: + group["initial_lr"] = initial_lr + super().__init__(optimizer, lr_lambda) + for group in optimizer.param_groups: + del group["initial_lr"] + + def get_lr(self): + opt = self.optimizer + lrs = [ + opt._get_lr(group, opt.state[group["params"][0]]) + for group in opt.param_groups + if group["params"][0].grad is not None + ] + if len(lrs) == 0: + lrs = self.base_lrs # if called before stepping + return lrs + + +def get_adafactor_schedule(optimizer, initial_lr=0.0): + """ + Get a proxy schedule for [`~optimization.Adafactor`] + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + initial_lr (`float`, *optional*, defaults to 0.0): + Initial lr + + Return: + [`~optimization.Adafactor`] proxy schedule object. + + + """ + return AdafactorSchedule(optimizer, initial_lr) diff --git a/modified/pytorch_utils.py b/modified/pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1a464b62a6651383ef0965ef908f9e0b842d4d65 --- /dev/null +++ b/modified/pytorch_utils.py @@ -0,0 +1,301 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Callable, List, Optional, Set, Tuple, Union + +import torch +from packaging import version +from safetensors.torch import storage_ptr, storage_size +from torch import nn + +from .utils import is_torch_tpu_available, logging + + +ALL_LAYERNORM_LAYERS = [nn.LayerNorm] + +logger = logging.get_logger(__name__) + +parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) + +is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1") +is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0") +is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13") +is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") +is_torch_greater_or_equal_than_1_11 = parsed_torch_version_base >= version.parse("1.11") +is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") +is_torch_1_8_0 = parsed_torch_version_base == version.parse("1.8.0") + + +def softmax_backward_data(parent, grad_output, output, dim, self): + """ + A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according + to the torch version detected. + """ + + from torch import _softmax_backward_data + + if is_torch_less_than_1_11: + return _softmax_backward_data(grad_output, output, parent.dim, self) + else: + return _softmax_backward_data(grad_output, output, parent.dim, self.dtype) + + +def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: + """ + Prune a linear layer to keep only entries in index. + + Used to remove heads. + + Args: + layer (`torch.nn.Linear`): The layer to prune. + index (`torch.LongTensor`): The indices to keep in the layer. + dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices. + + Returns: + `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`. + """ + index = index.to(layer.weight.device) + W = layer.weight.index_select(dim, index).clone().detach() + if layer.bias is not None: + if dim == 1: + b = layer.bias.clone().detach() + else: + b = layer.bias[index].clone().detach() + new_size = list(layer.weight.size()) + new_size[dim] = len(index) + new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) + new_layer.weight.requires_grad = False + new_layer.weight.copy_(W.contiguous()) + new_layer.weight.requires_grad = True + if layer.bias is not None: + new_layer.bias.requires_grad = False + new_layer.bias.copy_(b.contiguous()) + new_layer.bias.requires_grad = True + return new_layer + + +class Conv1D(nn.Module): + """ + 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). + + Basically works like a linear layer but the weights are transposed. + + Args: + nf (`int`): The number of output features. + nx (`int`): The number of input features. + """ + + def __init__(self, nf, nx): + super().__init__() + self.nf = nf + self.weight = nn.Parameter(torch.empty(nx, nf)) + self.bias = nn.Parameter(torch.zeros(nf)) + nn.init.normal_(self.weight, std=0.02) + + def forward(self, x): + size_out = x.size()[:-1] + (self.nf,) + x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) + x = x.view(size_out) + return x + + +def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: + """ + Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights + are transposed. + + Used to remove heads. + + Args: + layer ([`~pytorch_utils.Conv1D`]): The layer to prune. + index (`torch.LongTensor`): The indices to keep in the layer. + dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices. + + Returns: + [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. + """ + index = index.to(layer.weight.device) + W = layer.weight.index_select(dim, index).clone().detach() + if dim == 0: + b = layer.bias.clone().detach() + else: + b = layer.bias[index].clone().detach() + new_size = list(layer.weight.size()) + new_size[dim] = len(index) + new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) + new_layer.weight.requires_grad = False + new_layer.weight.copy_(W.contiguous()) + new_layer.weight.requires_grad = True + new_layer.bias.requires_grad = False + new_layer.bias.copy_(b.contiguous()) + new_layer.bias.requires_grad = True + return new_layer + + +def prune_layer( + layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None +) -> Union[nn.Linear, Conv1D]: + """ + Prune a Conv1D or linear layer to keep only entries in index. + + Used to remove heads. + + Args: + layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. + index (`torch.LongTensor`): The indices to keep in the layer. + dim (`int`, *optional*): The dimension on which to keep the indices. + + Returns: + `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. + """ + if isinstance(layer, nn.Linear): + return prune_linear_layer(layer, index, dim=0 if dim is None else dim) + elif isinstance(layer, Conv1D): + return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) + else: + raise ValueError(f"Can't prune layer of class {layer.__class__}") + + +def apply_chunking_to_forward( + forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors +) -> torch.Tensor: + """ + This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension + `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. + + If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly + applying `forward_fn` to `input_tensors`. + + Args: + forward_fn (`Callable[..., torch.Tensor]`): + The forward function of the model. + chunk_size (`int`): + The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. + chunk_dim (`int`): + The dimension over which the `input_tensors` should be chunked. + input_tensors (`Tuple[torch.Tensor]`): + The input tensors of `forward_fn` which will be chunked + + Returns: + `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. + + + Examples: + + ```python + # rename the usual forward() fn to forward_chunk() + def forward_chunk(self, hidden_states): + hidden_states = self.decoder(hidden_states) + return hidden_states + + + # implement a chunked forward function + def forward(self, hidden_states): + return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) + ```""" + + assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" + + # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility + num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) + if num_args_in_forward_chunk_fn != len(input_tensors): + raise ValueError( + f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " + "tensors are given" + ) + + if chunk_size > 0: + tensor_shape = input_tensors[0].shape[chunk_dim] + for input_tensor in input_tensors: + if input_tensor.shape[chunk_dim] != tensor_shape: + raise ValueError( + f"All input tenors have to be of the same shape: {tensor_shape}, " + f"found shape {input_tensor.shape[chunk_dim]}" + ) + + if input_tensors[0].shape[chunk_dim] % chunk_size != 0: + raise ValueError( + f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " + f"size {chunk_size}" + ) + + num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size + + # chunk input tensor into tuples + input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) + # apply forward fn to every tuple + output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) + # concatenate output at same dimension + return torch.cat(output_chunks, dim=chunk_dim) + + return forward_fn(*input_tensors) + + +def find_pruneable_heads_and_indices( + heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] +) -> Tuple[Set[int], torch.LongTensor]: + """ + Finds the heads and their indices taking `already_pruned_heads` into account. + + Args: + heads (`List[int]`): List of the indices of heads to prune. + n_heads (`int`): The number of heads in the model. + head_size (`int`): The size of each head. + already_pruned_heads (`Set[int]`): A set of already pruned heads. + + Returns: + `Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads` + into account and the indices of rows/columns to keep in the layer weight. + """ + mask = torch.ones(n_heads, head_size) + heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads + for head in heads: + # Compute how many pruned heads are before the head and move the index accordingly + head = head - sum(1 if h < head else 0 for h in already_pruned_heads) + mask[head] = 0 + mask = mask.view(-1).contiguous().eq(1) + index: torch.LongTensor = torch.arange(len(mask))[mask].long() + return heads, index + + +def meshgrid( + *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None +) -> Tuple[torch.Tensor, ...]: + """ + Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument. + + Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html + """ + return torch.meshgrid(*tensors, indexing=indexing) + + +def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: + """ + Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For + example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is + guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with + non-overlapping lifetimes may have the same id. + """ + if tensor.device.type == "xla" and is_torch_tpu_available(): + # NOTE: xla tensors dont have storage + # use some other unique id to distinguish. + # this is a XLA tensor, it must be created using torch_xla's + # device. So the following import is safe: + import torch_xla + + unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) + else: + unique_id = storage_ptr(tensor) + + return tensor.device, unique_id, storage_size(tensor) diff --git a/modified/safetensors_conversion.py b/modified/safetensors_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..46de0e5755fdf0ab4a29a629f861fa55ce0c28d6 --- /dev/null +++ b/modified/safetensors_conversion.py @@ -0,0 +1,107 @@ +import json +import uuid +from typing import Optional + +import requests +from huggingface_hub import Discussion, HfApi, get_repo_discussions + +from .utils import cached_file, logging + + +logger = logging.get_logger(__name__) + + +def previous_pr(api: HfApi, model_id: str, pr_title: str, token: str) -> Optional["Discussion"]: + main_commit = api.list_repo_commits(model_id, token=token)[0].commit_id + for discussion in get_repo_discussions(repo_id=model_id, token=token): + if discussion.title == pr_title and discussion.status == "open" and discussion.is_pull_request: + commits = api.list_repo_commits(model_id, revision=discussion.git_reference, token=token) + + if main_commit == commits[1].commit_id: + return discussion + return None + + +def spawn_conversion(token: str, private: bool, model_id: str): + logger.info("Attempting to convert .bin model on the fly to safetensors.") + + safetensors_convert_space_url = "https://safetensors-convert.hf.space" + sse_url = f"{safetensors_convert_space_url}/queue/join" + sse_data_url = f"{safetensors_convert_space_url}/queue/data" + + # The `fn_index` is necessary to indicate to gradio that we will use the `run` method of the Space. + hash_data = {"fn_index": 1, "session_hash": str(uuid.uuid4())} + + def start(_sse_connection, payload): + for line in _sse_connection.iter_lines(): + line = line.decode() + if line.startswith("data:"): + resp = json.loads(line[5:]) + logger.debug(f"Safetensors conversion status: {resp['msg']}") + if resp["msg"] == "queue_full": + raise ValueError("Queue is full! Please try again.") + elif resp["msg"] == "send_data": + event_id = resp["event_id"] + response = requests.post( + sse_data_url, + stream=True, + params=hash_data, + json={"event_id": event_id, **payload, **hash_data}, + ) + response.raise_for_status() + elif resp["msg"] == "process_completed": + return + + with requests.get(sse_url, stream=True, params=hash_data) as sse_connection: + data = {"data": [model_id, private, token]} + try: + logger.debug("Spawning safetensors automatic conversion.") + start(sse_connection, data) + except Exception as e: + logger.warning(f"Error during conversion: {repr(e)}") + + +def get_conversion_pr_reference(api: HfApi, model_id: str, **kwargs): + private = api.model_info(model_id).private + + logger.info("Attempting to create safetensors variant") + pr_title = "Adding `safetensors` variant of this model" + token = kwargs.get("token") + + # This looks into the current repo's open PRs to see if a PR for safetensors was already open. If so, it + # returns it. It checks that the PR was opened by the bot and not by another user so as to prevent + # security breaches. + pr = previous_pr(api, model_id, pr_title, token=token) + + if pr is None or (not private and pr.author != "SFConvertBot"): + spawn_conversion(token, private, model_id) + pr = previous_pr(api, model_id, pr_title, token=token) + else: + logger.info("Safetensors PR exists") + + sha = f"refs/pr/{pr.num}" + + return sha + + +def auto_conversion(pretrained_model_name_or_path: str, **cached_file_kwargs): + api = HfApi(token=cached_file_kwargs.get("token")) + sha = get_conversion_pr_reference(api, pretrained_model_name_or_path, **cached_file_kwargs) + + if sha is None: + return None, None + cached_file_kwargs["revision"] = sha + del cached_file_kwargs["_commit_hash"] + + # This is an additional HEAD call that could be removed if we could infer sharded/non-sharded from the PR + # description. + sharded = api.file_exists( + pretrained_model_name_or_path, + "model.safetensors.index.json", + revision=sha, + token=cached_file_kwargs.get("token"), + ) + filename = "model.safetensors.index.json" if sharded else "model.safetensors" + + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + return resolved_archive_file, sha, sharded diff --git a/modified/trainer_utils.py b/modified/trainer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd868d112024699c5e875ae1f5579a8626f6c94 --- /dev/null +++ b/modified/trainer_utils.py @@ -0,0 +1,772 @@ +# coding=utf-8 +# Copyright 2020-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow. +""" + +import copy +import functools +import gc +import inspect +import os +import random +import re +import threading +import time +from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union + +import numpy as np + +from .utils import ( + ExplicitEnum, + is_psutil_available, + is_tf_available, + is_torch_available, + is_torch_cuda_available, + is_torch_mps_available, + is_torch_npu_available, + is_torch_tpu_available, + is_torch_xpu_available, + requires_backends, +) + + +if is_torch_available(): + import torch + + +def seed_worker(_): + """ + Helper function to set worker seed during Dataloader initialization. + """ + worker_seed = torch.initial_seed() % 2**32 + set_seed(worker_seed) + + +def enable_full_determinism(seed: int, warn_only: bool = False): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow + """ + # set seed first + set_seed(seed) + + if is_torch_available(): + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True, warn_only=warn_only) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + if is_tf_available(): + import tensorflow as tf + + tf.config.experimental.enable_op_determinism() + + +def set_seed(seed: int): + """ + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). + + Args: + seed (`int`): The seed to set. + """ + random.seed(seed) + np.random.seed(seed) + if is_torch_available(): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if is_torch_npu_available(): + torch.npu.manual_seed_all(seed) + if is_torch_xpu_available(): + torch.xpu.manual_seed_all(seed) + if is_tf_available(): + import tensorflow as tf + + tf.random.set_seed(seed) + + +def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding + layers. This method is slightly adapted from the original source code that can be found here: + https://github.com/neelsjain/NEFTune Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to + the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + + +class EvalPrediction: + """ + Evaluation output (always contains labels), to be used to compute metrics. + + Parameters: + predictions (`np.ndarray`): Predictions of the model. + label_ids (`np.ndarray`): Targets to be matched. + inputs (`np.ndarray`, *optional*): + """ + + def __init__( + self, + predictions: Union[np.ndarray, Tuple[np.ndarray]], + label_ids: Union[np.ndarray, Tuple[np.ndarray]], + inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None, + ): + self.predictions = predictions + self.label_ids = label_ids + self.inputs = inputs + + def __iter__(self): + if self.inputs is not None: + return iter((self.predictions, self.label_ids, self.inputs)) + else: + return iter((self.predictions, self.label_ids)) + + def __getitem__(self, idx): + if idx < 0 or idx > 2: + raise IndexError("tuple index out of range") + if idx == 2 and self.inputs is None: + raise IndexError("tuple index out of range") + if idx == 0: + return self.predictions + elif idx == 1: + return self.label_ids + elif idx == 2: + return self.inputs + + +class EvalLoopOutput(NamedTuple): + predictions: Union[np.ndarray, Tuple[np.ndarray]] + label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] + metrics: Optional[Dict[str, float]] + num_samples: Optional[int] + + +class PredictionOutput(NamedTuple): + predictions: Union[np.ndarray, Tuple[np.ndarray]] + label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] + metrics: Optional[Dict[str, float]] + + +class TrainOutput(NamedTuple): + global_step: int + training_loss: float + metrics: Dict[str, float] + + +PREFIX_CHECKPOINT_DIR = "checkpoint" +_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") + + +def get_last_checkpoint(folder): + content = os.listdir(folder) + checkpoints = [ + path + for path in content + if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) + ] + if len(checkpoints) == 0: + return + return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) + + +class IntervalStrategy(ExplicitEnum): + NO = "no" + STEPS = "steps" + EPOCH = "epoch" + + +class EvaluationStrategy(ExplicitEnum): + NO = "no" + STEPS = "steps" + EPOCH = "epoch" + + +class HubStrategy(ExplicitEnum): + END = "end" + EVERY_SAVE = "every_save" + CHECKPOINT = "checkpoint" + ALL_CHECKPOINTS = "all_checkpoints" + + +class BestRun(NamedTuple): + """ + The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). + + Parameters: + run_id (`str`): + The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending + with run-{run_id}). + objective (`float`): + The objective that was obtained for this run. + hyperparameters (`Dict[str, Any]`): + The hyperparameters picked to get this run. + run_summary (`Optional[Any]`): + A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. + """ + + run_id: str + objective: Union[float, List[float]] + hyperparameters: Dict[str, Any] + run_summary: Optional[Any] = None + + +def default_compute_objective(metrics: Dict[str, float]) -> float: + """ + The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no + metrics are provided to the [`Trainer`], the sum of all metrics otherwise. + + Args: + metrics (`Dict[str, float]`): The metrics returned by the evaluate method. + + Return: + `float`: The objective to minimize or maximize + """ + metrics = copy.deepcopy(metrics) + loss = metrics.pop("eval_loss", None) + _ = metrics.pop("epoch", None) + # Remove speed metrics + speed_metrics = [ + m + for m in metrics.keys() + if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time") + ] + for sm in speed_metrics: + _ = metrics.pop(sm, None) + return loss if len(metrics) == 0 else sum(metrics.values()) + + +def default_hp_space_optuna(trial) -> Dict[str, float]: + from .integrations import is_optuna_available + + assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" + return { + "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), + "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), + "seed": trial.suggest_int("seed", 1, 40), + "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), + } + + +def default_hp_space_ray(trial) -> Dict[str, float]: + from .integrations import is_ray_tune_available + + assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" + from ray import tune + + return { + "learning_rate": tune.loguniform(1e-6, 1e-4), + "num_train_epochs": tune.choice(list(range(1, 6))), + "seed": tune.uniform(1, 40), + "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), + } + + +def default_hp_space_sigopt(trial): + return [ + {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, + {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, + {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, + { + "categorical_values": ["4", "8", "16", "32", "64"], + "name": "per_device_train_batch_size", + "type": "categorical", + }, + ] + + +def default_hp_space_wandb(trial) -> Dict[str, float]: + from .integrations import is_wandb_available + + if not is_wandb_available(): + raise ImportError("This function needs wandb installed: `pip install wandb`") + + return { + "method": "random", + "metric": {"name": "objective", "goal": "minimize"}, + "parameters": { + "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, + "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, + "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, + "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, + }, + } + + +class HPSearchBackend(ExplicitEnum): + OPTUNA = "optuna" + RAY = "ray" + SIGOPT = "sigopt" + WANDB = "wandb" + + +def is_main_process(local_rank): + """ + Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on + `local_rank`. + """ + if is_torch_tpu_available(check_device=True): + import torch_xla.core.xla_model as xm + + return xm.get_ordinal() == 0 + return local_rank in [-1, 0] + + +def total_processes_number(local_rank): + """ + Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. + """ + if is_torch_tpu_available(check_device=True): + import torch_xla.core.xla_model as xm + + return xm.xrt_world_size() + elif local_rank != -1 and is_torch_available(): + import torch + + return torch.distributed.get_world_size() + return 1 + + +def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None): + """ + Measure and return speed performance metrics. + + This function requires a time snapshot `start_time` before the operation to be measured starts and this function + should be run immediately after the operation to be measured has completed. + + Args: + - split: name to prefix metric (like train, eval, test...) + - start_time: operation start time + - num_samples: number of samples processed + - num_tokens: number of tokens processed + """ + runtime = time.time() - start_time + result = {f"{split}_runtime": round(runtime, 4)} + if runtime == 0: + return result + if num_samples is not None: + samples_per_second = num_samples / runtime + result[f"{split}_samples_per_second"] = round(samples_per_second, 3) + if num_steps is not None: + steps_per_second = num_steps / runtime + result[f"{split}_steps_per_second"] = round(steps_per_second, 3) + if num_tokens is not None: + tokens_per_second = num_tokens / runtime + result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3) + return result + + +class SchedulerType(ExplicitEnum): + LINEAR = "linear" + COSINE = "cosine" + COSINE_WITH_RESTARTS = "cosine_with_restarts" + POLYNOMIAL = "polynomial" + CONSTANT = "constant" + CONSTANT_WITH_WARMUP = "constant_with_warmup" + INVERSE_SQRT = "inverse_sqrt" + REDUCE_ON_PLATEAU = "reduce_lr_on_plateau" + + +class TrainerMemoryTracker: + """ + A helper class that tracks cpu and gpu memory. + + This class will silently skip unless `psutil` is available. Install with `pip install psutil`. + + When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. + + Example : + + ```python + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) + self._memory_tracker.start() + # code ... + metrics = {"train_runtime": 10.5} + self._memory_tracker.stop_and_update_metrics(metrics) + ``` + + At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`. + + To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. + """ + + # map trainer methods to metrics prefix + stages = { + "__init__": "init", + "train": "train", + "_inner_training_loop": "train", + "evaluate": "eval", + "predict": "test", + } + + def __init__(self, skip_memory_metrics=False): + self.skip_memory_metrics = skip_memory_metrics + + if not is_psutil_available(): + # soft dependency on psutil + self.skip_memory_metrics = True + + if self.skip_memory_metrics: + return + + import psutil # noqa + + if is_torch_cuda_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_mps_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_xpu_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_npu_available(): + import torch + + self.torch = torch + self.gpu = {} + else: + self.torch = None + + self.process = psutil.Process() + + self.cur_stage = None + self.cpu = {} + self.init_reported = False + + def derive_stage(self): + """derives the stage/caller name automatically""" + caller = inspect.currentframe().f_back.f_back.f_code.co_name + if caller in self.stages: + return self.stages[caller] + else: + raise ValueError( + f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" + ) + + def cpu_mem_used(self): + """get resident set size memory for the current process""" + return self.process.memory_info().rss + + def peak_monitor_func(self): + self.cpu_mem_used_peak = -1 + + while True: + self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) + + # can't sleep or will not catch the peak right (this comment is here on purpose) + # time.sleep(0.001) # 1msec + + if not self.peak_monitoring: + break + + def start(self): + """start tracking for the caller's stage""" + if self.skip_memory_metrics: + return + + stage = self.derive_stage() + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + self.cur_stage = stage + + gc.collect() + + if self.torch is not None: + if torch.cuda.is_available(): + self.torch.cuda.reset_peak_memory_stats() + self.torch.cuda.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.reset_peak_memory_stats() + self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.reset_peak_memory_stats() + self.torch.npu.empty_cache() + + # gpu + if self.torch is not None: + if torch.cuda.is_available(): + self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() + + # cpu + self.cpu_mem_used_at_start = self.cpu_mem_used() + + self.peak_monitoring = True + peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) + peak_monitor_thread.daemon = True + peak_monitor_thread.start() + + def stop(self, stage): + """stop tracking for the passed stage""" + + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + # this sends a signal to peak_monitor_func to complete its loop + self.peak_monitoring = False + + # first ensure all objects get collected and their memory is freed + gc.collect() + + if self.torch is not None: + if torch.cuda.is_available(): + self.torch.cuda.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.empty_cache() + + # concepts: + # - alloc_delta: the difference of allocated memory between the end and the start + # - peaked_delta: the difference between the peak memory and the current memory + # in order to know how much memory the measured code consumed one needs to sum these two + + # gpu + if self.torch is not None: + if torch.cuda.is_available(): + self.gpu_mem_used_now = self.torch.cuda.memory_allocated() + self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_now = self.torch.xpu.memory_allocated() + self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_now = self.torch.npu.memory_allocated() + self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() + else: + raise ValueError("No available GPU device found!") + + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), + } + + # cpu + self.cpu_mem_used_now = self.cpu_mem_used() + self.cpu[self.cur_stage] = { + "begin": self.cpu_mem_used_at_start, + "end": self.cpu_mem_used_now, + "alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start), + "peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), + } + + # reset - cycle finished + self.cur_stage = None + + def update_metrics(self, stage, metrics): + """updates the metrics""" + if self.skip_memory_metrics: + return + + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + # since we don't have a way to return init metrics, we push them into the first of train/val/predict + stages = [stage] + if not self.init_reported: + stages.insert(0, "init") + self.init_reported = True + + for stage in stages: + for t in ["alloc", "peaked"]: + if stage in self.cpu and t in self.cpu[stage]: + metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] + if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: + metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] + # if we need additional debug info, enable the following + # for t in ["begin", "end"]: + # if stage in self.cpu and t in self.cpu[stage]: + # metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t] + # if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: + # metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t] + + # since memory can be allocated before init, and it might be difficult to track overall + # memory usage, in particular for GPU, let's report memory usage at the point init was called + if stages[0] == "init": + metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] + if self.torch is not None: + metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] + # if we also wanted to report any additional memory allocations in between init and + # whatever the next stage was we could also report this: + # if self.cpu["init"]["end"] != self.cpu[stage]["begin"]: + # metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"] + # if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]: + # metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"] + + def stop_and_update_metrics(self, metrics=None): + """combine stop and metrics update in one call for simpler code""" + if self.skip_memory_metrics: + return + + stage = self.derive_stage() + self.stop(stage) + + # init doesn't have metrics to update so we just save that data for later stages to retrieve + if metrics is not None: + self.update_metrics(stage, metrics) + + +def has_length(dataset): + """ + Checks if the dataset implements __len__() and it doesn't raise an error + """ + try: + return len(dataset) is not None + except TypeError: + # TypeError: len() of unsized object + return False + + +def denumpify_detensorize(metrics): + """ + Recursively calls `.item()` on the element of the dictionary passed + """ + if isinstance(metrics, (list, tuple)): + return type(metrics)(denumpify_detensorize(m) for m in metrics) + elif isinstance(metrics, dict): + return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) + elif isinstance(metrics, np.generic): + return metrics.item() + elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: + return metrics.item() + return metrics + + +def number_of_arguments(func): + """ + Return the number of arguments of the passed function, even if it's a partial function. + """ + if isinstance(func, functools.partial): + total_args = len(inspect.signature(func.func).parameters) + return total_args - len(func.args) - len(func.keywords) + return len(inspect.signature(func).parameters) + + +def find_executable_batch_size( + function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False +): + """ + Args: + A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or + CUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as + its first argument. + function (`callable`, *optional*) + A function to wrap + starting_batch_size (`int`, *optional*) + The batch size to try and fit into memory + auto_find_batch_size (`bool`, *optional*) + If False, will just execute `function` + """ + if function is None: + return functools.partial( + find_executable_batch_size, + starting_batch_size=starting_batch_size, + auto_find_batch_size=auto_find_batch_size, + ) + + if auto_find_batch_size: + requires_backends(find_executable_batch_size, "accelerate") + from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size + + return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) + + return functools.partial(function, batch_size=starting_batch_size) + + +class FSDPOption(ExplicitEnum): + FULL_SHARD = "full_shard" + SHARD_GRAD_OP = "shard_grad_op" + NO_SHARD = "no_shard" + HYBRID_SHARD = "hybrid_shard" + HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" + OFFLOAD = "offload" + AUTO_WRAP = "auto_wrap" + + +class RemoveColumnsCollator: + """Wrap the data collator to remove unused columns before they are passed to the collator.""" + + def __init__( + self, + data_collator, + signature_columns, + logger=None, + model_name: Optional[str] = None, + description: Optional[str] = None, + ): + self.data_collator = data_collator + self.signature_columns = signature_columns + self.logger = logger + self.description = description + self.model_name = model_name + self.message_logged = False + + def _remove_columns(self, feature: dict) -> dict: + if not isinstance(feature, dict): + return feature + if not self.message_logged and self.logger and self.model_name: + ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if self.description is None else f"in the {self.description} set" + self.logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " + " you can safely ignore this message." + ) + self.message_logged = True + return {k: v for k, v in feature.items() if k in self.signature_columns} + + def __call__(self, features: List[dict]): + features = [self._remove_columns(feature) for feature in features] + return self.data_collator(features) diff --git a/modified/utils/__init__.py b/modified/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e2f9e0403987ba40b911070dbef1808c4ced5b --- /dev/null +++ b/modified/utils/__init__.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from huggingface_hub import get_full_repo_name # for backward compatibility +from packaging import version + +from .. import __version__ +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD +from .doc import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + copy_func, + replace_return_docstrings, +) +from .generic import ( + ContextManagers, + ExplicitEnum, + ModelOutput, + PaddingStrategy, + TensorType, + add_model_info_to_auto_map, + cached_property, + can_return_loss, + expand_dims, + find_labels, + flatten_dict, + infer_framework, + is_jax_tensor, + is_numpy_array, + is_tensor, + is_tf_symbolic_tensor, + is_tf_tensor, + is_torch_device, + is_torch_dtype, + is_torch_tensor, + reshape, + squeeze, + strtobool, + tensor_size, + to_numpy, + to_py_obj, + transpose, + working_or_temp_dir, +) +from .hub import ( + CLOUDFRONT_DISTRIB_PREFIX, + DISABLE_TELEMETRY, + HF_MODULES_CACHE, + HUGGINGFACE_CO_PREFIX, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + PYTORCH_PRETRAINED_BERT_CACHE, + PYTORCH_TRANSFORMERS_CACHE, + S3_BUCKET_PREFIX, + TRANSFORMERS_CACHE, + TRANSFORMERS_DYNAMIC_MODULE_NAME, + EntryNotFoundError, + PushInProgress, + PushToHubMixin, + RepositoryNotFoundError, + RevisionNotFoundError, + cached_file, + default_cache_path, + define_sagemaker_information, + download_url, + extract_commit_hash, + get_cached_models, + get_file_from_repo, + has_file, + http_user_agent, + is_offline_mode, + is_remote_url, + move_cache, + send_example_telemetry, + try_to_load_from_cache, +) +from .import_utils import ( + ENV_VARS_TRUE_AND_AUTO_VALUES, + ENV_VARS_TRUE_VALUES, + TORCH_FX_REQUIRED_VERSION, + USE_JAX, + USE_TF, + USE_TORCH, + DummyObject, + OptionalDependencyNotAvailable, + _LazyModule, + ccl_version, + direct_transformers_import, + get_torch_version, + is_accelerate_available, + is_apex_available, + is_auto_awq_available, + is_auto_gptq_available, + is_bitsandbytes_available, + is_bs4_available, + is_coloredlogs_available, + is_cv2_available, + is_cython_available, + is_datasets_available, + is_decord_available, + is_detectron2_available, + is_essentia_available, + is_faiss_available, + is_flash_attn_2_available, + is_flash_attn_available, + is_flash_attn_greater_or_equal_2_10, + is_flax_available, + is_fsdp_available, + is_ftfy_available, + is_in_notebook, + is_ipex_available, + is_jieba_available, + is_jinja_available, + is_jumanpp_available, + is_kenlm_available, + is_keras_nlp_available, + is_levenshtein_available, + is_librosa_available, + is_natten_available, + is_ninja_available, + is_nltk_available, + is_onnx_available, + is_openai_available, + is_optimum_available, + is_pandas_available, + is_peft_available, + is_phonemizer_available, + is_pretty_midi_available, + is_protobuf_available, + is_psutil_available, + is_py3nvml_available, + is_pyctcdecode_available, + is_pytesseract_available, + is_pytest_available, + is_pytorch_quantization_available, + is_rjieba_available, + is_sacremoses_available, + is_safetensors_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_scipy_available, + is_sentencepiece_available, + is_seqio_available, + is_sklearn_available, + is_soundfile_availble, + is_spacy_available, + is_speech_available, + is_sudachi_available, + is_tensorflow_probability_available, + is_tensorflow_text_available, + is_tf2onnx_available, + is_tf_available, + is_timm_available, + is_tokenizers_available, + is_torch_available, + is_torch_bf16_available, + is_torch_bf16_available_on_device, + is_torch_bf16_cpu_available, + is_torch_bf16_gpu_available, + is_torch_compile_available, + is_torch_cuda_available, + is_torch_fp16_available_on_device, + is_torch_fx_available, + is_torch_fx_proxy, + is_torch_mps_available, + is_torch_neuroncore_available, + is_torch_npu_available, + is_torch_sdpa_available, + is_torch_tensorrt_fx_available, + is_torch_tf32_available, + is_torch_tpu_available, + is_torch_xpu_available, + is_torchaudio_available, + is_torchdistx_available, + is_torchdynamo_available, + is_torchvision_available, + is_training_run_on_sagemaker, + is_vision_available, + requires_backends, + tf_required, + torch_only_method, + torch_required, +) +from .peft_utils import ( + ADAPTER_CONFIG_NAME, + ADAPTER_SAFE_WEIGHTS_NAME, + ADAPTER_WEIGHTS_NAME, + check_peft_version, + find_adapter_config_file, +) + + +WEIGHTS_NAME = "pytorch_model.bin" +WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" +TF2_WEIGHTS_NAME = "tf_model.h5" +TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json" +TF_WEIGHTS_NAME = "model.ckpt" +FLAX_WEIGHTS_NAME = "flax_model.msgpack" +FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json" +SAFE_WEIGHTS_NAME = "model.safetensors" +SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json" +CONFIG_NAME = "config.json" +FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" +IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME +GENERATION_CONFIG_NAME = "generation_config.json" +MODEL_CARD_NAME = "modelcard.json" + +SENTENCEPIECE_UNDERLINE = "▁" +SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility + +MULTIPLE_CHOICE_DUMMY_INPUTS = [ + [[0, 1, 0, 1], [1, 0, 0, 1]] +] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. +DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] +DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] + + +def check_min_version(min_version): + if version.parse(__version__) < version.parse(min_version): + if "dev" in min_version: + error_message = ( + "This example requires a source install from HuggingFace Transformers (see " + "`https://huggingface.co/docs/transformers/installation#install-from-source`)," + ) + else: + error_message = f"This example requires a minimum version of {min_version}," + error_message += f" but the version found is {__version__}.\n" + raise ImportError( + error_message + + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " + "versions of HuggingFace Transformers." + ) diff --git a/modified/utils/__pycache__/__init__.cpython-39.pyc b/modified/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1154d4031281dea5106a317017dfd49b88cfd210 Binary files /dev/null and b/modified/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/backbone_utils.cpython-39.pyc b/modified/utils/__pycache__/backbone_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dc816fe0cb6365beaccc3fd94e26c41dddc1a2a Binary files /dev/null and b/modified/utils/__pycache__/backbone_utils.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/bitsandbytes.cpython-39.pyc b/modified/utils/__pycache__/bitsandbytes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43d4ed76e219b72a0dd5fde590d3eb1d7bdb0aa8 Binary files /dev/null and b/modified/utils/__pycache__/bitsandbytes.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/constants.cpython-39.pyc b/modified/utils/__pycache__/constants.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfaa1d968882224e14f34c4136ebaf2b4b3bdf49 Binary files /dev/null and b/modified/utils/__pycache__/constants.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/doc.cpython-39.pyc b/modified/utils/__pycache__/doc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da8f8a0ef9f0d7557f147213885cec976a9c5199 Binary files /dev/null and b/modified/utils/__pycache__/doc.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_detectron2_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_detectron2_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b74ace0afb0932e79a7dcd60763b65d7fb7a8ef5 Binary files /dev/null and b/modified/utils/__pycache__/dummy_detectron2_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ac5819ded93b12b6bfffe6f84e5672a7d616f80 Binary files /dev/null and b/modified/utils/__pycache__/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_flax_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_flax_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..569612ba8ef1bab1481827e6ce24be2b061b7f33 Binary files /dev/null and b/modified/utils/__pycache__/dummy_flax_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_keras_nlp_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_keras_nlp_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26f777899e4c7e81ed7152eba80232ce7720bdd1 Binary files /dev/null and b/modified/utils/__pycache__/dummy_keras_nlp_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_music_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_music_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7edeeddd38d4e597018a118bd956f990e78a058d Binary files /dev/null and b/modified/utils/__pycache__/dummy_music_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_pt_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_pt_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5a54d3100ee988c3aad6b3f8ee61d09294a7e4e Binary files /dev/null and b/modified/utils/__pycache__/dummy_pt_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf4eab31fd933d932448978088abe30525330d33 Binary files /dev/null and b/modified/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_sentencepiece_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_sentencepiece_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a7bfd3c380bd48de59265c9b308b62241b2ebd Binary files /dev/null and b/modified/utils/__pycache__/dummy_sentencepiece_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_speech_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_speech_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42d94cc3a10fd17556c475cc5dbf1224345be7e7 Binary files /dev/null and b/modified/utils/__pycache__/dummy_speech_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_tensorflow_text_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_tensorflow_text_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf63cbe3e29cd55e85bc6f9b3f0b25d00caf4938 Binary files /dev/null and b/modified/utils/__pycache__/dummy_tensorflow_text_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_tf_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_tf_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e5f11a4188c19192aa2b299380f7ce2162aa8b Binary files /dev/null and b/modified/utils/__pycache__/dummy_tf_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_tokenizers_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_tokenizers_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a5e1bd4f7a763c531022ef09b86414f877b2900 Binary files /dev/null and b/modified/utils/__pycache__/dummy_tokenizers_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/dummy_vision_objects.cpython-39.pyc b/modified/utils/__pycache__/dummy_vision_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6de511505d03294a03d296643d15039fd1628e29 Binary files /dev/null and b/modified/utils/__pycache__/dummy_vision_objects.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/fx.cpython-39.pyc b/modified/utils/__pycache__/fx.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3496b3d0f045df2c7f07ed46215bf69254adba6d Binary files /dev/null and b/modified/utils/__pycache__/fx.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/generic.cpython-39.pyc b/modified/utils/__pycache__/generic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d6d0f870b93fc7582f08c5d2121748daa3599c3 Binary files /dev/null and b/modified/utils/__pycache__/generic.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/hp_naming.cpython-39.pyc b/modified/utils/__pycache__/hp_naming.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aeace348f99027c01dfa3051ecb4278c6fb3f99 Binary files /dev/null and b/modified/utils/__pycache__/hp_naming.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/hub.cpython-39.pyc b/modified/utils/__pycache__/hub.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e416e951ce25b879a75dc46e9a7a4eef58dd2e1e Binary files /dev/null and b/modified/utils/__pycache__/hub.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/import_utils.cpython-39.pyc b/modified/utils/__pycache__/import_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70c8ae9631cd1b6ec2b74adb8599d631fda4f18a Binary files /dev/null and b/modified/utils/__pycache__/import_utils.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/logging.cpython-39.pyc b/modified/utils/__pycache__/logging.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc206953417ea6e4cd1d2013365097c8b504fedb Binary files /dev/null and b/modified/utils/__pycache__/logging.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/model_parallel_utils.cpython-39.pyc b/modified/utils/__pycache__/model_parallel_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19f88da75299c7cf28e7e3dc6fbc1ebecfdd6251 Binary files /dev/null and b/modified/utils/__pycache__/model_parallel_utils.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/notebook.cpython-39.pyc b/modified/utils/__pycache__/notebook.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d4787bcca289afbc81d1183b5f600342843bfb8 Binary files /dev/null and b/modified/utils/__pycache__/notebook.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/peft_utils.cpython-39.pyc b/modified/utils/__pycache__/peft_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04c194e9d19d1d5e476090acf15d5a695f6239e4 Binary files /dev/null and b/modified/utils/__pycache__/peft_utils.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/quantization_config.cpython-39.pyc b/modified/utils/__pycache__/quantization_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7305a6befe921bc312c41221c9c6092b7e380d10 Binary files /dev/null and b/modified/utils/__pycache__/quantization_config.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/sentencepiece_model_pb2.cpython-39.pyc b/modified/utils/__pycache__/sentencepiece_model_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14c8671f469f2f2a19d5dad931a06ff5bb182768 Binary files /dev/null and b/modified/utils/__pycache__/sentencepiece_model_pb2.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/sentencepiece_model_pb2_new.cpython-39.pyc b/modified/utils/__pycache__/sentencepiece_model_pb2_new.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..289eabbf6fd4b5c4dab9a5c00494add6e2ba64bf Binary files /dev/null and b/modified/utils/__pycache__/sentencepiece_model_pb2_new.cpython-39.pyc differ diff --git a/modified/utils/__pycache__/versions.cpython-39.pyc b/modified/utils/__pycache__/versions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d991e731411b6d0044d786359e073c37ed583d Binary files /dev/null and b/modified/utils/__pycache__/versions.cpython-39.pyc differ diff --git a/modified/utils/backbone_utils.py b/modified/utils/backbone_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc1d82470df26e7e680f48bbc36f3e9953dcd13 --- /dev/null +++ b/modified/utils/backbone_utils.py @@ -0,0 +1,271 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Collection of utils to be used by backbones and their components.""" + +import enum +import inspect +from typing import Iterable, List, Optional, Tuple, Union + + +class BackboneType(enum.Enum): + TIMM = "timm" + TRANSFORMERS = "transformers" + + +def verify_out_features_out_indices( + out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]] +): + """ + Verify that out_indices and out_features are valid for the given stage_names. + """ + if stage_names is None: + raise ValueError("Stage_names must be set for transformers backbones") + + if out_features is not None: + if not isinstance(out_features, (list,)): + raise ValueError(f"out_features must be a list {type(out_features)}") + if any(feat not in stage_names for feat in out_features): + raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}") + + if out_indices is not None: + if not isinstance(out_indices, (list, tuple)): + raise ValueError(f"out_indices must be a list or tuple, got {type(out_indices)}") + if any(idx >= len(stage_names) for idx in out_indices): + raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}") + + if out_features is not None and out_indices is not None: + if len(out_features) != len(out_indices): + raise ValueError("out_features and out_indices should have the same length if both are set") + if out_features != [stage_names[idx] for idx in out_indices]: + raise ValueError("out_features and out_indices should correspond to the same stages if both are set") + + +def _align_output_features_output_indices( + out_features: Optional[List[str]], + out_indices: Optional[Union[List[int], Tuple[int]]], + stage_names: List[str], +): + """ + Finds the corresponding `out_features` and `out_indices` for the given `stage_names`. + + The logic is as follows: + - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the + `out_indices`. + - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the + `out_features`. + - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. + - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned. + + Args: + out_features (`List[str]`): The names of the features for the backbone to output. + out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. + stage_names (`List[str]`): The names of the stages of the backbone. + """ + if out_indices is None and out_features is None: + out_indices = [len(stage_names) - 1] + out_features = [stage_names[-1]] + elif out_indices is None and out_features is not None: + out_indices = [stage_names.index(layer) for layer in out_features] + elif out_features is None and out_indices is not None: + out_features = [stage_names[idx] for idx in out_indices] + return out_features, out_indices + + +def get_aligned_output_features_output_indices( + out_features: Optional[List[str]], + out_indices: Optional[Union[List[int], Tuple[int]]], + stage_names: List[str], +) -> Tuple[List[str], List[int]]: + """ + Get the `out_features` and `out_indices` so that they are aligned. + + The logic is as follows: + - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the + `out_indices`. + - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the + `out_features`. + - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. + - `out_indices` and `out_features` set: they are verified to be aligned. + + Args: + out_features (`List[str]`): The names of the features for the backbone to output. + out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. + stage_names (`List[str]`): The names of the stages of the backbone. + """ + # First verify that the out_features and out_indices are valid + verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) + output_features, output_indices = _align_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=stage_names + ) + # Verify that the aligned out_features and out_indices are valid + verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names) + return output_features, output_indices + + +class BackboneMixin: + backbone_type: Optional[BackboneType] = None + + def _init_timm_backbone(self, config) -> None: + """ + Initialize the backbone model from timm The backbone must already be loaded to self._backbone + """ + if getattr(self, "_backbone", None) is None: + raise ValueError("self._backbone must be set before calling _init_timm_backbone") + + # These will diagree with the defaults for the transformers models e.g. for resnet50 + # the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4'] + # the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4'] + self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info] + self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info] + out_indices = self._backbone.feature_info.out_indices + out_features = self._backbone.feature_info.module_name() + + # We verify the out indices and out features are valid + verify_out_features_out_indices( + out_features=out_features, out_indices=out_indices, stage_names=self.stage_names + ) + self._out_features, self._out_indices = out_features, out_indices + + def _init_transformers_backbone(self, config) -> None: + stage_names = getattr(config, "stage_names") + out_features = getattr(config, "out_features", None) + out_indices = getattr(config, "out_indices", None) + + self.stage_names = stage_names + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=stage_names + ) + # Number of channels for each stage. This is set in the transformer backbone model init + self.num_features = None + + def _init_backbone(self, config) -> None: + """ + Method to initialize the backbone. This method is called by the constructor of the base class after the + pretrained model weights have been loaded. + """ + self.config = config + + self.use_timm_backbone = getattr(config, "use_timm_backbone", False) + self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS + + if self.backbone_type == BackboneType.TIMM: + self._init_timm_backbone(config) + elif self.backbone_type == BackboneType.TRANSFORMERS: + self._init_transformers_backbone(config) + else: + raise ValueError(f"backbone_type {self.backbone_type} not supported.") + + @property + def out_features(self): + return self._out_features + + @out_features.setter + def out_features(self, out_features: List[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=None, stage_names=self.stage_names + ) + + @property + def out_indices(self): + return self._out_indices + + @out_indices.setter + def out_indices(self, out_indices: Union[Tuple[int], List[int]]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=None, out_indices=out_indices, stage_names=self.stage_names + ) + + @property + def out_feature_channels(self): + # the current backbones will output the number of channels for each stage + # even if that stage is not in the out_features list. + return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} + + @property + def channels(self): + return [self.out_feature_channels[name] for name in self.out_features] + + def forward_with_filtered_kwargs(self, *args, **kwargs): + signature = dict(inspect.signature(self.forward).parameters) + filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature} + return self(*args, **filtered_kwargs) + + def forward( + self, + pixel_values, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + raise NotImplementedError("This method should be implemented by the derived class.") + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to + include the `out_features` and `out_indices` attributes. + """ + output = super().to_dict() + output["out_features"] = output.pop("_out_features") + output["out_indices"] = output.pop("_out_indices") + return output + + +class BackboneConfigMixin: + """ + A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. + """ + + @property + def out_features(self): + return self._out_features + + @out_features.setter + def out_features(self, out_features: List[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=None, stage_names=self.stage_names + ) + + @property + def out_indices(self): + return self._out_indices + + @out_indices.setter + def out_indices(self, out_indices: Union[Tuple[int], List[int]]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=None, out_indices=out_indices, stage_names=self.stage_names + ) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to + include the `out_features` and `out_indices` attributes. + """ + output = super().to_dict() + output["out_features"] = output.pop("_out_features") + output["out_indices"] = output.pop("_out_indices") + return output diff --git a/modified/utils/bitsandbytes.py b/modified/utils/bitsandbytes.py new file mode 100644 index 0000000000000000000000000000000000000000..71707cf5659909f7e28f939e91df6c48e64aba43 --- /dev/null +++ b/modified/utils/bitsandbytes.py @@ -0,0 +1,28 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings + + +warnings.warn( + "transformers.utils.bitsandbytes module is deprecated and will be removed in a future version. Please import bitsandbytes modules directly from transformers.integrations", + FutureWarning, +) + +from ..integrations import ( # noqa + get_keys_to_not_convert, + replace_8bit_linear, + replace_with_bnb_linear, + set_module_8bit_tensor_to_device, + set_module_quantized_tensor_to_device, +) diff --git a/modified/utils/constants.py b/modified/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fefd1b4601da04e073ff2880099ccaf87d0b1666 --- /dev/null +++ b/modified/utils/constants.py @@ -0,0 +1,6 @@ +IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] +IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] +IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] +IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] +OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] +OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711] diff --git a/modified/utils/doc.py b/modified/utils/doc.py new file mode 100644 index 0000000000000000000000000000000000000000..23679f31a3e2ec231e2c4c64ecdf9db5a5eb1c97 --- /dev/null +++ b/modified/utils/doc.py @@ -0,0 +1,1190 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Doc utilities: Utilities related to documentation +""" + +import functools +import re +import types + + +def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") + return fn + + return docstring_decorator + + +def add_start_docstrings_to_model_forward(*docstr): + def docstring_decorator(fn): + docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") + class_name = f"[`{fn.__qualname__.split('.')[0]}`]" + intro = f" The {class_name} forward method, overrides the `__call__` special method." + note = r""" + + + + Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] + instance afterwards instead of this since the former takes care of running the pre and post processing steps while + the latter silently ignores them. + + +""" + + fn.__doc__ = intro + note + docstring + return fn + + return docstring_decorator + + +def add_end_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) + return fn + + return docstring_decorator + + +PT_RETURN_INTRODUCTION = r""" + Returns: + [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of + `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various + elements depending on the configuration ([`{config_class}`]) and inputs. + +""" + + +TF_RETURN_INTRODUCTION = r""" + Returns: + [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if + `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the + configuration ([`{config_class}`]) and inputs. + +""" + + +def _get_indent(t): + """Returns the indentation in the first line of t""" + search = re.search(r"^(\s*)\S", t) + return "" if search is None else search.groups()[0] + + +def _convert_output_args_doc(output_args_doc): + """Convert output_args_doc to display properly.""" + # Split output_arg_doc in blocks argument/description + indent = _get_indent(output_args_doc) + blocks = [] + current_block = "" + for line in output_args_doc.split("\n"): + # If the indent is the same as the beginning, the line is the name of new arg. + if _get_indent(line) == indent: + if len(current_block) > 0: + blocks.append(current_block[:-1]) + current_block = f"{line}\n" + else: + # Otherwise it's part of the description of the current arg. + # We need to remove 2 spaces to the indentation. + current_block += f"{line[2:]}\n" + blocks.append(current_block[:-1]) + + # Format each block for proper rendering + for i in range(len(blocks)): + blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) + blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) + + return "\n".join(blocks) + + +def _prepare_output_docstrings(output_type, config_class, min_indent=None): + """ + Prepares the return part of the docstring using `output_type`. + """ + output_docstring = output_type.__doc__ + + # Remove the head of the docstring to keep the list of args only + lines = output_docstring.split("\n") + i = 0 + while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + params_docstring = "\n".join(lines[(i + 1) :]) + params_docstring = _convert_output_args_doc(params_docstring) + else: + raise ValueError( + f"No `Args` or `Parameters` section is found in the docstring of `{output_type.__name__}`. Make sure it has " + "docstring and contain either `Args` or `Parameters`." + ) + + # Add the return introduction + full_output_type = f"{output_type.__module__}.{output_type.__name__}" + intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION + intro = intro.format(full_output_type=full_output_type, config_class=config_class) + result = intro + params_docstring + + # Apply minimum indent if necessary + if min_indent is not None: + lines = result.split("\n") + # Find the indent of the first nonempty line + i = 0 + while len(lines[i]) == 0: + i += 1 + indent = len(_get_indent(lines[i])) + # If too small, add indentation to all nonempty lines + if indent < min_indent: + to_add = " " * (min_indent - indent) + lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] + result = "\n".join(lines) + + return result + + +FAKE_MODEL_DISCLAIMER = """ + + + This example uses a random model as the real ones are all very big. To get proper results, you should use + {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try + adding `device_map="auto"` in the `from_pretrained` call. + + +""" + + +PT_TOKEN_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer( + ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" + ... ) + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_token_class_ids = logits.argmax(-1) + + >>> # Note that tokens are classified rather then input words which means that + >>> # there might be more predicted token classes than words. + >>> # Multiple token classes might account for the same word + >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] + >>> predicted_tokens_classes + {expected_output} + + >>> labels = predicted_token_class_ids + >>> loss = model(**inputs, labels=labels).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + +PT_QUESTION_ANSWERING_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + + >>> inputs = tokenizer(question, text, return_tensors="pt") + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> answer_start_index = outputs.start_logits.argmax() + >>> answer_end_index = outputs.end_logits.argmax() + + >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] + >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) + {expected_output} + + >>> # target is "nice puppet" + >>> target_start_index = torch.tensor([{qa_target_start_index}]) + >>> target_end_index = torch.tensor([{qa_target_end_index}]) + + >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) + >>> loss = outputs.loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + +PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" + Example of single-label classification: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_class_id = logits.argmax().item() + >>> model.config.id2label[predicted_class_id] + {expected_output} + + >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` + >>> num_labels = len(model.config.id2label) + >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) + + >>> labels = torch.tensor([1]) + >>> loss = model(**inputs, labels=labels).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` + + Example of multi-label classification: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] + + >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` + >>> num_labels = len(model.config.id2label) + >>> model = {model_class}.from_pretrained( + ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" + ... ) + + >>> labels = torch.sum( + ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 + ... ).to(torch.float) + >>> loss = model(**inputs, labels=labels).loss + ``` +""" + +PT_MASKED_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> # retrieve index of {mask} + >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] + + >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) + >>> tokenizer.decode(predicted_token_id) + {expected_output} + + >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] + >>> # mask labels of non-{mask} tokens + >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) + + >>> outputs = model(**inputs, labels=labels) + >>> round(outputs.loss.item(), 2) + {expected_loss} + ``` +""" + +PT_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +PT_MULTIPLE_CHOICE_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> choice0 = "It is eaten with a fork and a knife." + >>> choice1 = "It is eaten while held in the hand." + >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 + + >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) + >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 + + >>> # the linear classifier still needs to be trained + >>> loss = outputs.loss + >>> logits = outputs.logits + ``` +""" + +PT_CAUSAL_LM_SAMPLE = r""" + Example: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs, labels=inputs["input_ids"]) + >>> loss = outputs.loss + >>> logits = outputs.logits + ``` +""" + +PT_SPEECH_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> import torch + >>> from datasets import load_dataset + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +PT_SPEECH_CTC_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + >>> predicted_ids = torch.argmax(logits, dim=-1) + + >>> # transcribe speech + >>> transcription = processor.batch_decode(predicted_ids) + >>> transcription[0] + {expected_output} + + >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids + + >>> # compute loss + >>> loss = model(**inputs).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + +PT_SPEECH_SEQ_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoFeatureExtractor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() + >>> predicted_label = model.config.id2label[predicted_class_ids] + >>> predicted_label + {expected_output} + + >>> # compute loss - target_label is e.g. "down" + >>> target_label = model.config.id2label[0] + >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) + >>> loss = model(**inputs).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + + +PT_SPEECH_FRAME_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoFeatureExtractor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> probabilities = torch.sigmoid(logits[0]) + >>> # labels is a one-hot array of shape (num_frames, num_speakers) + >>> labels = (probabilities > 0.5).long() + >>> labels[0].tolist() + {expected_output} + ``` +""" + + +PT_SPEECH_XVECTOR_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoFeatureExtractor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor( + ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True + ... ) + >>> with torch.no_grad(): + ... embeddings = model(**inputs).embeddings + + >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() + + >>> # the resulting embeddings can be used for cosine similarity-based retrieval + >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) + >>> similarity = cosine_sim(embeddings[0], embeddings[1]) + >>> threshold = 0.7 # the optimal threshold is dataset-dependent + >>> if similarity < threshold: + ... print("Speakers are not the same!") + >>> round(similarity.item(), 2) + {expected_output} + ``` +""" + +PT_VISION_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> import torch + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +PT_VISION_SEQ_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> import torch + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_label = logits.argmax(-1).item() + >>> print(model.config.id2label[predicted_label]) + {expected_output} + ``` +""" + + +PT_SAMPLE_DOCSTRINGS = { + "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, + "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, + "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, + "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, + "MaskedLM": PT_MASKED_LM_SAMPLE, + "LMHead": PT_CAUSAL_LM_SAMPLE, + "BaseModel": PT_BASE_MODEL_SAMPLE, + "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, + "CTC": PT_SPEECH_CTC_SAMPLE, + "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, + "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, + "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, + "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, + "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, +} + + +TF_TOKEN_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer( + ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" + ... ) + + >>> logits = model(**inputs).logits + >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) + + >>> # Note that tokens are classified rather then input words which means that + >>> # there might be more predicted token classes than words. + >>> # Multiple token classes might account for the same word + >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] + >>> predicted_tokens_classes + {expected_output} + ``` + + ```python + >>> labels = predicted_token_class_ids + >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_QUESTION_ANSWERING_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + + >>> inputs = tokenizer(question, text, return_tensors="tf") + >>> outputs = model(**inputs) + + >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) + >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) + + >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] + >>> tokenizer.decode(predict_answer_tokens) + {expected_output} + ``` + + ```python + >>> # target is "nice puppet" + >>> target_start_index = tf.constant([{qa_target_start_index}]) + >>> target_end_index = tf.constant([{qa_target_end_index}]) + + >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) + >>> loss = tf.math.reduce_mean(outputs.loss) + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + + >>> logits = model(**inputs).logits + + >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) + >>> model.config.id2label[predicted_class_id] + {expected_output} + ``` + + ```python + >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` + >>> num_labels = len(model.config.id2label) + >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) + + >>> labels = tf.constant(1) + >>> loss = model(**inputs, labels=labels).loss + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_MASKED_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") + >>> logits = model(**inputs).logits + + >>> # retrieve index of {mask} + >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) + >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) + + >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) + >>> tokenizer.decode(predicted_token_id) + {expected_output} + ``` + + ```python + >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] + >>> # mask labels of non-{mask} tokens + >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) + + >>> outputs = model(**inputs, labels=labels) + >>> round(float(outputs.loss), 2) + {expected_loss} + ``` +""" + +TF_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + >>> outputs = model(inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +TF_MULTIPLE_CHOICE_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> choice0 = "It is eaten with a fork and a knife." + >>> choice1 = "It is eaten while held in the hand." + + >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) + >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} + >>> outputs = model(inputs) # batch size is 1 + + >>> # the linear classifier still needs to be trained + >>> logits = outputs.logits + ``` +""" + +TF_CAUSAL_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + >>> outputs = model(inputs) + >>> logits = outputs.logits + ``` +""" + +TF_SPEECH_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> from datasets import load_dataset + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +TF_SPEECH_CTC_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> from datasets import load_dataset + >>> import tensorflow as tf + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") + >>> logits = model(**inputs).logits + >>> predicted_ids = tf.math.argmax(logits, axis=-1) + + >>> # transcribe speech + >>> transcription = processor.batch_decode(predicted_ids) + >>> transcription[0] + {expected_output} + ``` + + ```python + >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids + + >>> # compute loss + >>> loss = model(**inputs).loss + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_VISION_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="tf") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +TF_VISION_SEQ_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> import tensorflow as tf + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="tf") + >>> logits = model(**inputs).logits + + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) + >>> print(model.config.id2label[predicted_label]) + {expected_output} + ``` +""" + +TF_SAMPLE_DOCSTRINGS = { + "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, + "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, + "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, + "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, + "MaskedLM": TF_MASKED_LM_SAMPLE, + "LMHead": TF_CAUSAL_LM_SAMPLE, + "BaseModel": TF_BASE_MODEL_SAMPLE, + "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, + "CTC": TF_SPEECH_CTC_SAMPLE, + "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, + "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, +} + + +FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") + + >>> outputs = model(**inputs) + >>> logits = outputs.logits + ``` +""" + +FLAX_QUESTION_ANSWERING_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + >>> inputs = tokenizer(question, text, return_tensors="jax") + + >>> outputs = model(**inputs) + >>> start_scores = outputs.start_logits + >>> end_scores = outputs.end_logits + ``` +""" + +FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") + + >>> outputs = model(**inputs) + >>> logits = outputs.logits + ``` +""" + +FLAX_MASKED_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") + + >>> outputs = model(**inputs) + >>> logits = outputs.logits + ``` +""" + +FLAX_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +FLAX_MULTIPLE_CHOICE_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> choice0 = "It is eaten with a fork and a knife." + >>> choice1 = "It is eaten while held in the hand." + + >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) + >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) + + >>> logits = outputs.logits + ``` +""" + +FLAX_CAUSAL_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") + >>> outputs = model(**inputs) + + >>> # retrieve logts for next token + >>> next_token_logits = outputs.logits[:, -1] + ``` +""" + +FLAX_SAMPLE_DOCSTRINGS = { + "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, + "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, + "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, + "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, + "MaskedLM": FLAX_MASKED_LM_SAMPLE, + "BaseModel": FLAX_BASE_MODEL_SAMPLE, + "LMHead": FLAX_CAUSAL_LM_SAMPLE, +} + + +def filter_outputs_from_example(docstring, **kwargs): + """ + Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. + """ + for key, value in kwargs.items(): + if value is not None: + continue + + doc_key = "{" + key + "}" + docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) + + return docstring + + +def add_code_sample_docstrings( + *docstr, + processor_class=None, + checkpoint=None, + output_type=None, + config_class=None, + mask="[MASK]", + qa_target_start_index=14, + qa_target_end_index=15, + model_cls=None, + modality=None, + expected_output=None, + expected_loss=None, + real_checkpoint=None, + revision=None, +): + def docstring_decorator(fn): + # model_class defaults to function's class if not specified otherwise + model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls + + if model_class[:2] == "TF": + sample_docstrings = TF_SAMPLE_DOCSTRINGS + elif model_class[:4] == "Flax": + sample_docstrings = FLAX_SAMPLE_DOCSTRINGS + else: + sample_docstrings = PT_SAMPLE_DOCSTRINGS + + # putting all kwargs for docstrings in a dict to be used + # with the `.format(**doc_kwargs)`. Note that string might + # be formatted with non-existing keys, which is fine. + doc_kwargs = { + "model_class": model_class, + "processor_class": processor_class, + "checkpoint": checkpoint, + "mask": mask, + "qa_target_start_index": qa_target_start_index, + "qa_target_end_index": qa_target_end_index, + "expected_output": expected_output, + "expected_loss": expected_loss, + "real_checkpoint": real_checkpoint, + "fake_checkpoint": checkpoint, + "true": "{true}", # For syntax that conflicts with formatting. + } + + if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": + code_sample = sample_docstrings["AudioClassification"] + elif "SequenceClassification" in model_class: + code_sample = sample_docstrings["SequenceClassification"] + elif "QuestionAnswering" in model_class: + code_sample = sample_docstrings["QuestionAnswering"] + elif "TokenClassification" in model_class: + code_sample = sample_docstrings["TokenClassification"] + elif "MultipleChoice" in model_class: + code_sample = sample_docstrings["MultipleChoice"] + elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: + code_sample = sample_docstrings["MaskedLM"] + elif "LMHead" in model_class or "CausalLM" in model_class: + code_sample = sample_docstrings["LMHead"] + elif "CTC" in model_class: + code_sample = sample_docstrings["CTC"] + elif "AudioFrameClassification" in model_class: + code_sample = sample_docstrings["AudioFrameClassification"] + elif "XVector" in model_class and modality == "audio": + code_sample = sample_docstrings["AudioXVector"] + elif "Model" in model_class and modality == "audio": + code_sample = sample_docstrings["SpeechBaseModel"] + elif "Model" in model_class and modality == "vision": + code_sample = sample_docstrings["VisionBaseModel"] + elif "Model" in model_class or "Encoder" in model_class: + code_sample = sample_docstrings["BaseModel"] + elif "ImageClassification" in model_class: + code_sample = sample_docstrings["ImageClassification"] + else: + raise ValueError(f"Docstring can't be built for model {model_class}") + + code_sample = filter_outputs_from_example( + code_sample, expected_output=expected_output, expected_loss=expected_loss + ) + if real_checkpoint is not None: + code_sample = FAKE_MODEL_DISCLAIMER + code_sample + func_doc = (fn.__doc__ or "") + "".join(docstr) + output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) + built_doc = code_sample.format(**doc_kwargs) + if revision is not None: + if re.match(r"^refs/pr/\\d+", revision): + raise ValueError( + f"The provided revision '{revision}' is incorrect. It should point to" + " a pull request reference on the hub like 'refs/pr/6'" + ) + built_doc = built_doc.replace( + f'from_pretrained("{checkpoint}")', f'from_pretrained("{checkpoint}", revision="{revision}")' + ) + fn.__doc__ = func_doc + output_doc + built_doc + return fn + + return docstring_decorator + + +def replace_return_docstrings(output_type=None, config_class=None): + def docstring_decorator(fn): + func_doc = fn.__doc__ + lines = func_doc.split("\n") + i = 0 + while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + indent = len(_get_indent(lines[i])) + lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) + func_doc = "\n".join(lines) + else: + raise ValueError( + f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " + f"current docstring is:\n{func_doc}" + ) + fn.__doc__ = func_doc + return fn + + return docstring_decorator + + +def copy_func(f): + """Returns a copy of a function f.""" + # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) + g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = f.__kwdefaults__ + return g diff --git a/modified/utils/dummy_detectron2_objects.py b/modified/utils/dummy_detectron2_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..41dfb6f81d34ef2f18ad67ef46d25180ca7cd602 --- /dev/null +++ b/modified/utils/dummy_detectron2_objects.py @@ -0,0 +1,14 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import requires_backends + + +LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMv2Model: + def __init__(self, *args, **kwargs): + requires_backends(self, ["detectron2"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["detectron2"]) diff --git a/modified/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py b/modified/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d75a6ec22e90427c972a753a24afd1a780758f --- /dev/null +++ b/modified/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py @@ -0,0 +1,23 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class Pop2PianoFeatureExtractor(metaclass=DummyObject): + _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) + + +class Pop2PianoTokenizer(metaclass=DummyObject): + _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) + + +class Pop2PianoProcessor(metaclass=DummyObject): + _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) diff --git a/modified/utils/dummy_flax_objects.py b/modified/utils/dummy_flax_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf17e711556cb3eece3464e0452828f483da69d --- /dev/null +++ b/modified/utils/dummy_flax_objects.py @@ -0,0 +1,1370 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxForceTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGenerationMixin(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLogitsProcessorList(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLogitsWarper(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMinLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxSuppressTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxTemperatureLogitsWarper(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxTopKLogitsWarper(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxTopPLogitsWarper(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWhisperTimeStampLogitsProcessor(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertForPreTraining(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAlbertPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None + + +FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None + + +FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None + + +FLAX_MODEL_FOR_MASKED_LM_MAPPING = None + + +FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None + + +FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None + + +FLAX_MODEL_FOR_PRETRAINING_MAPPING = None + + +FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None + + +FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None + + +FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None + + +FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None + + +FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None + + +FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None + + +FLAX_MODEL_MAPPING = None + + +class FlaxAutoModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForImageClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForPreTraining(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxAutoModelForVision2Seq(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBartPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBeitForImageClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBeitModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBeitPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForPreTraining(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBertPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForPreTraining(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBigBirdPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBlenderbotModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBlenderbotSmallModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBloomForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBloomModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxBloomPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPTextModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPTextModelWithProjection(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPVisionModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxDistilBertPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForPreTraining(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxElectraPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxEncoderDecoderModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPT2LMHeadModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPT2Model(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPTNeoForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPTNeoModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPTJForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPTJModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxGPTJPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLlamaForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLlamaModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLlamaPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLongT5Model(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxLongT5PreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMarianModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMarianMTModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMarianPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMBartForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMBartForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMBartModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMBartPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMT5EncoderModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxMT5Model(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxOPTForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxOPTModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxOPTPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxPegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxPegasusModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxPegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRegNetForImageClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRegNetModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRegNetPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxResNetForImageClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxResNetModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxResNetPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxRoFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxT5EncoderModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxT5Model(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxT5PreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxVisionEncoderDecoderModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxVisionTextDualEncoderModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxViTForImageClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxViTModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxViTPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWav2Vec2ForCTC(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWav2Vec2Model(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWhisperForAudioClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWhisperModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxWhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXGLMForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXGLMModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXGLMPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + +class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) diff --git a/modified/utils/dummy_keras_nlp_objects.py b/modified/utils/dummy_keras_nlp_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..c6bb86a6d9b49e78f8936f3c1eb3cfc8b8db7951 --- /dev/null +++ b/modified/utils/dummy_keras_nlp_objects.py @@ -0,0 +1,9 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class TFGPT2Tokenizer(metaclass=DummyObject): + _backends = ["keras_nlp"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["keras_nlp"]) diff --git a/modified/utils/dummy_music_objects.py b/modified/utils/dummy_music_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..89052be47c1d32bac5cbd6fceab183fc1d75d3bf --- /dev/null +++ b/modified/utils/dummy_music_objects.py @@ -0,0 +1,16 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class Pop2PianoFeatureExtractor(metaclass=DummyObject): + _backends = ["music"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["music"]) + + +class Pop2PianoTokenizer(metaclass=DummyObject): + _backends = ["music"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["music"]) diff --git a/modified/utils/dummy_pt_objects.py b/modified/utils/dummy_pt_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..f633c83765fac6f6b2ecbec11966655496949b3c --- /dev/null +++ b/modified/utils/dummy_pt_objects.py @@ -0,0 +1,9340 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class PyTorchBenchmark(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PyTorchBenchmarkArguments(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Cache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DynamicCache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SinkCache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GlueDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GlueDataTrainingArguments(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LineByLineTextDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LineByLineWithRefDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LineByLineWithSOPTextDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SquadDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SquadDataTrainingArguments(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TextDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TextDatasetForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeamScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeamSearchScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConstrainedBeamSearchScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Constraint(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConstraintListState(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DisjunctiveConstraint(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EpsilonLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EtaLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ExponentialDecayLengthPenalty(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ForceTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GenerationMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HammingDiversityLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InfNanRemoveLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitNormalization(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitsProcessorList(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaxLengthCriteria(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaxTimeCriteria(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MinLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NoBadWordsLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhrasalConstraint(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PrefixConstrainedLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SequenceBiasLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StoppingCriteria(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StoppingCriteriaList(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SuppressTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TemperatureLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TopKLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TopPLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TypicalLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UnbatchedClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperTimeStampLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def top_k_top_p_filtering(*args, **kwargs): + requires_backends(top_k_top_p_filtering, ["torch"]) + + +class PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AlbertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_albert(*args, **kwargs): + requires_backends(load_tf_weights_in_albert, ["torch"]) + + +ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AlignModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlignPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlignTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlignVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AltCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AltCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AltCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AltCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ASTForAudioClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ASTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ASTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_AUDIO_XVECTOR_MAPPING = None + + +MODEL_FOR_BACKBONE_MAPPING = None + + +MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None + + +MODEL_FOR_CAUSAL_LM_MAPPING = None + + +MODEL_FOR_CTC_MAPPING = None + + +MODEL_FOR_DEPTH_ESTIMATION_MAPPING = None + + +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None + + +MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = None + + +MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = None + + +MODEL_FOR_MASK_GENERATION_MAPPING = None + + +MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None + + +MODEL_FOR_MASKED_LM_MAPPING = None + + +MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None + + +MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None + + +MODEL_FOR_OBJECT_DETECTION_MAPPING = None + + +MODEL_FOR_PRETRAINING_MAPPING = None + + +MODEL_FOR_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None + + +MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None + + +MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None + + +MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_TEXT_ENCODING_MAPPING = None + + +MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = None + + +MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None + + +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None + + +MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = None + + +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_VISION_2_SEQ_MAPPING = None + + +MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None + + +MODEL_MAPPING = None + + +MODEL_WITH_LM_HEAD_MAPPING = None + + +class AutoBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForAudioClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForAudioXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForImageSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForImageToImage(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForInstanceSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMaskGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSeq2SeqLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSpeechSeq2Seq(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTableQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTextEncoding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTextToSpectrogram(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTextToWaveform(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForUniversalSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForVision2Seq(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForVisualQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForZeroShotImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForZeroShotObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelWithLMHead(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AutoformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BARK_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BarkCausalModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkCoarseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkFineModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkSemanticModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BART_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BartForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartPretrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PretrainedBartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BeitBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_bert(*args, **kwargs): + requires_backends(load_tf_weights_in_bert, ["torch"]) + + +class BertGenerationDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertGenerationEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertGenerationPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_bert_generation(*args, **kwargs): + requires_backends(load_tf_weights_in_bert_generation, ["torch"]) + + +BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BigBirdForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_big_bird(*args, **kwargs): + requires_backends(load_tf_weights_in_big_bird, ["torch"]) + + +BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BigBirdPegasusForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BioGptForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BitBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BitForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BlenderbotForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BlenderbotSmallForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotSmallModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotSmallPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BlipForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipForImageTextRetrieval(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Blip2ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2QFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2VisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BloomForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BridgeTowerForContrastiveLearning(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BROS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BrosForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosSpadeEEForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosSpadeELForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CamembertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CanineForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CaninePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_canine(*args, **kwargs): + requires_backends(load_tf_weights_in_canine, ["torch"]) + + +CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ChineseCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ChineseCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ChineseCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ChineseCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ClapAudioModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapAudioModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapFeatureExtractor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapTextModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPTextModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPVisionModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CLIPSegForImageSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ClvpDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpModelForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CodeGenForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CodeGenModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CodeGenPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConditionalDetrForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConditionalDetrForSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConditionalDetrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConditionalDetrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConvBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_convbert(*args, **kwargs): + requires_backends(load_tf_weights_in_convbert, ["torch"]) + + +CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConvNextBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConvNextV2Backbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CpmAntForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CpmAntModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CpmAntPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CTRLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CTRLLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CTRLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CTRLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CvtForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CvtModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CvtPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DebertaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DebertaV2ForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DecisionTransformerGPT2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DecisionTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DecisionTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DeformableDetrForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeformableDetrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeformableDetrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DeiTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MCTCTForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MCTCTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MCTCTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MMBTForClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MMBTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ModalEmbeddings(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RetriBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RetriBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TrajectoryTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AdaptiveEmbedding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_transfo_xl(*args, **kwargs): + requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) + + +VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VanForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VanModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VanPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DetaForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DetrForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetrForSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DinatBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DinatForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DinatModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DinatPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Dinov2Backbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DistilBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DonutSwinModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DonutSwinPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DPRContextEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPretrainedContextEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPretrainedQuestionEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPretrainedReader(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRQuestionEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRReader(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DPTForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EfficientFormerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EfficientNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ElectraForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_electra(*args, **kwargs): + requires_backends(load_tf_weights_in_electra, ["torch"]) + + +ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EncodecModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncodecPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncoderDecoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ErnieForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErniePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ErnieMForInformationExtraction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EsmFoldPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForProteinFolding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FalconForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FlaubertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertWithLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FlavaForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaImageCodebook(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaImageModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaMultimodalModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FNetForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FocalNetBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FSMTForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FSMTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PretrainedFSMTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FunnelBaseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_funnel(*args, **kwargs): + requires_backends(load_tf_weights_in_funnel, ["torch"]) + + +class FuyuForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FuyuPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GitForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GitVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GLPNForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GLPNModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GLPNPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPT2DoubleHeadsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2LMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_gpt2(*args, **kwargs): + requires_backends(load_tf_weights_in_gpt2, ["torch"]) + + +GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTBigCodeForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodeForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodeForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_gpt_neo(*args, **kwargs): + requires_backends(load_tf_weights_in_gpt_neo, ["torch"]) + + +GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoXForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapaneseLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapaneseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTJForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTSanJapaneseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTSanJapanesePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GraphormerForGraphClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GraphormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GraphormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GroupViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroupViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroupViTTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroupViTVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class HubertForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HubertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HubertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HubertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class IBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class IdeficsForVisionText2Text(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IdeficsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IdeficsPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IdeficsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ImageGPTForCausalImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ImageGPTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ImageGPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ImageGPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_imagegpt(*args, **kwargs): + requires_backends(load_tf_weights_in_imagegpt, ["torch"]) + + +INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class InformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class InstructBlipForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InstructBlipPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InstructBlipQFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InstructBlipVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class JukeboxModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JukeboxPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JukeboxPrior(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JukeboxVQVAE(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Kosmos2ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Kosmos2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Kosmos2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LED_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LEDForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LevitForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LevitForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LevitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LevitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LiltForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LlavaForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlavaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlavaProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LongformerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerSelfAttention(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LongT5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LukeForEntityClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForEntityPairClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForEntitySpanClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertVisualFeatureEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertXLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class M2M100ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class M2M100Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class M2M100PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarianForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarianModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarianMTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MarkupLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Mask2FormerForUniversalSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Mask2FormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Mask2FormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MaskFormerForInstanceSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaskFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaskFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaskFormerSwinBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MegaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MegatronBertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MgpstrForSceneTextRecognition(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MgpstrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MgpstrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_mobilebert(*args, **kwargs): + requires_backends(load_tf_weights_in_mobilebert, ["torch"]) + + +MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileNetV1ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV1Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV1PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_mobilenet_v1(*args, **kwargs): + requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"]) + + +MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileNetV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV2ForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_mobilenet_v2(*args, **kwargs): + requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"]) + + +MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileViTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileViTV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTV2ForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MPNetForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MptForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MRA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MraForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MusicgenForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MvpForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NatBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NatForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NatModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NatPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NezhaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NllbMoeForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoeSparseMLP(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoeTop2Router(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NystromformerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OneFormerForUniversalSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OneFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OneFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_openai_gpt(*args, **kwargs): + requires_backends(load_tf_weights_in_openai_gpt, ["torch"]) + + +OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OPTForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Owlv2ForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2TextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2VisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OwlViTForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PatchTSMixerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerForPretraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerForRegression(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerForTimeSeriesClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PatchTSTForClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForPretraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForRegression(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PegasusXForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusXModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusXPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForImageClassificationFourier(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForImageClassificationLearned(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForMultimodalAutoencoding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForOpticalFlow(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PhiForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Pix2StructForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pix2StructPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pix2StructTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pix2StructVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PLBartForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PoolFormerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PoolFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PoolFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Pop2PianoForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pop2PianoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ProphetNetDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PVT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PvtForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class QDQBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_qdqbert(*args, **kwargs): + requires_backends(load_tf_weights_in_qdqbert, ["torch"]) + + +class RagModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagSequenceForGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagTokenForGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RealmEmbedder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmForOpenQA(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmKnowledgeAugEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmReader(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmRetriever(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_realm(*args, **kwargs): + requires_backends(load_tf_weights_in_realm, ["torch"]) + + +REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ReformerAttention(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerModelWithLMHead(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RegNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RegNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RegNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RemBertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_rembert(*args, **kwargs): + requires_backends(load_tf_weights_in_rembert, ["torch"]) + + +RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ResNetBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ResNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ResNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ResNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RobertaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RobertaPreLayerNormForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RoCBertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_roc_bert(*args, **kwargs): + requires_backends(load_tf_weights_in_roc_bert, ["torch"]) + + +ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RoFormerForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_roformer(*args, **kwargs): + requires_backends(load_tf_weights_in_roformer, ["torch"]) + + +RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RwkvForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RwkvModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RwkvPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SamModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SamPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SeamlessM4TCodeHifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForSpeechToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForSpeechToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForTextToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForTextToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4THifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TTextToUnitForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TTextToUnitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SeamlessM4Tv2ForSpeechToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2ForSpeechToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2ForTextToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2ForTextToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SegformerDecodeHead(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SEWForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SEWDForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWDForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWDModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWDPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechEncoderDecoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Speech2TextForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2TextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2TextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2Text2ForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2Text2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SpeechT5ForSpeechToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5ForSpeechToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5ForTextToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5HifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SplinterForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SqueezeBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertModule(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SwiftFormerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwiftFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwiftFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SwinBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Swin2SRForImageSuperResolution(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swin2SRModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swin2SRPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Swinv2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2ForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SwitchTransformersEncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersSparseMLP(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersTop1Router(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +T5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class T5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_t5(*args, **kwargs): + requires_backends(load_tf_weights_in_t5, ["torch"]) + + +TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TableTransformerForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TableTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TableTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TapasForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_tapas(*args, **kwargs): + requires_backends(load_tf_weights_in_tapas, ["torch"]) + + +TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TimeSeriesTransformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimeSeriesTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TimesformerForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimesformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimesformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimmBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TrOCRForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TrOCRPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TvltForAudioVisualClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvltForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvltModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvltPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TvpForVideoGrounding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UniSpeechForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UnivNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UperNetForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UperNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VideoMAEForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViltForImageAndTextRetrieval(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForImagesAndTextClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisionEncoderDecoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisionTextDualEncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VisualBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForVisualReasoning(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTHybridForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTHybridModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTHybridPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTMAEForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMAELayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMAEModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTMSNForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMSNModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMSNPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VitDetBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitDetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitDetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VitMatteForImageMatting(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitMattePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VITS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VitsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitsPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VivitForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VivitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VivitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class WavLMForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class WhisperForAudioClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XGLMForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XGLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XGLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMWithLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMProphetNetDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMRobertaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMRobertaXLForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLNetForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_xlnet(*args, **kwargs): + requires_backends(load_tf_weights_in_xlnet, ["torch"]) + + +XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XmodForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class YolosForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YolosModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YolosPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class YosoForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Adafactor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AdamW(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def get_constant_schedule(*args, **kwargs): + requires_backends(get_constant_schedule, ["torch"]) + + +def get_constant_schedule_with_warmup(*args, **kwargs): + requires_backends(get_constant_schedule_with_warmup, ["torch"]) + + +def get_cosine_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_schedule_with_warmup, ["torch"]) + + +def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) + + +def get_inverse_sqrt_schedule(*args, **kwargs): + requires_backends(get_inverse_sqrt_schedule, ["torch"]) + + +def get_linear_schedule_with_warmup(*args, **kwargs): + requires_backends(get_linear_schedule_with_warmup, ["torch"]) + + +def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): + requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) + + +def get_scheduler(*args, **kwargs): + requires_backends(get_scheduler, ["torch"]) + + +class Conv1D(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def apply_chunking_to_forward(*args, **kwargs): + requires_backends(apply_chunking_to_forward, ["torch"]) + + +def prune_layer(*args, **kwargs): + requires_backends(prune_layer, ["torch"]) + + +class Trainer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def torch_distributed_zero_first(*args, **kwargs): + requires_backends(torch_distributed_zero_first, ["torch"]) + + +class Seq2SeqTrainer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) diff --git a/modified/utils/dummy_sentencepiece_and_tokenizers_objects.py b/modified/utils/dummy_sentencepiece_and_tokenizers_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..38775330a81d91030f000e58c0e6035bba1c0f31 --- /dev/null +++ b/modified/utils/dummy_sentencepiece_and_tokenizers_objects.py @@ -0,0 +1,9 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +SLOW_TO_FAST_CONVERTERS = None + + +def convert_slow_tokenizer(*args, **kwargs): + requires_backends(convert_slow_tokenizer, ["sentencepiece", "tokenizers"]) diff --git a/modified/utils/dummy_sentencepiece_objects.py b/modified/utils/dummy_sentencepiece_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..658645746329c697860b39fabaeb8e9fc721d910 --- /dev/null +++ b/modified/utils/dummy_sentencepiece_objects.py @@ -0,0 +1,233 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AlbertTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BarthezTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BartphoTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BertGenerationTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BigBirdTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class CamembertTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class CodeLlamaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class CpmTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class DebertaV2Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class ErnieMTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class FNetTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class GPTSw3Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class LayoutXLMTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class LlamaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class M2M100Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MarianTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MBart50Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MBartTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MLukeTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MT5Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class NllbTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class PegasusTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class PLBartTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class ReformerTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class RemBertTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class SeamlessM4TTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class Speech2TextTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class SpeechT5Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class T5Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XGLMTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XLMProphetNetTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XLMRobertaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XLNetTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) diff --git a/modified/utils/dummy_speech_objects.py b/modified/utils/dummy_speech_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..0bf08ebea42b4595ae1f8bbc2afcddf0630dcf4b --- /dev/null +++ b/modified/utils/dummy_speech_objects.py @@ -0,0 +1,16 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class ASTFeatureExtractor(metaclass=DummyObject): + _backends = ["speech"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["speech"]) + + +class Speech2TextFeatureExtractor(metaclass=DummyObject): + _backends = ["speech"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["speech"]) diff --git a/modified/utils/dummy_tensorflow_text_objects.py b/modified/utils/dummy_tensorflow_text_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..70c7ad5cbf4077609e36592566e461c1a1ded28a --- /dev/null +++ b/modified/utils/dummy_tensorflow_text_objects.py @@ -0,0 +1,9 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class TFBertTokenizer(metaclass=DummyObject): + _backends = ["tensorflow_text"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tensorflow_text"]) diff --git a/modified/utils/dummy_tf_objects.py b/modified/utils/dummy_tf_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..2099c18bcd71c5d5fe693c37bc85e9d90f4e6adb --- /dev/null +++ b/modified/utils/dummy_tf_objects.py @@ -0,0 +1,3002 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class TensorFlowBenchmarkArguments(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TensorFlowBenchmark(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFForcedBOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFForcedEOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFForceTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGenerationMixin(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLogitsProcessorList(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMinLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFNoBadWordsLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFNoRepeatNGramLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSuppressTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTemperatureLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTopKLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTopPLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +def tf_top_k_top_p_filtering(*args, **kwargs): + requires_backends(tf_top_k_top_p_filtering, ["tf"]) + + +class KerasMetricCallback(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class PushToHubCallback(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSequenceSummary(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSharedEmbeddings(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +def shape_list(*args, **kwargs): + requires_backends(shape_list, ["tf"]) + + +TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFAlbertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_CAUSAL_LM_MAPPING = None + + +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + +TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_MASK_GENERATION_MAPPING = None + + +TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None + + +TF_MODEL_FOR_MASKED_LM_MAPPING = None + + +TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None + + +TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None + + +TF_MODEL_FOR_PRETRAINING_MAPPING = None + + +TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None + + +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None + + +TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None + + +TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None + + +TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None + + +TF_MODEL_FOR_TEXT_ENCODING_MAPPING = None + + +TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_VISION_2_SEQ_MAPPING = None + + +TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None + + +TF_MODEL_MAPPING = None + + +TF_MODEL_WITH_LM_HEAD_MAPPING = None + + +class TFAutoModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForAudioClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMaskedImageModeling(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMaskGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForNextSentencePrediction(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSeq2SeqLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSpeechSeq2Seq(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForTableQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForTextEncoding(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForVision2Seq(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForZeroShotImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelWithLMHead(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartPretrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFBertEmbeddings(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotSmallModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotSmallPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFBlipForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipForImageTextRetrieval(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCamembertForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCLIPModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCLIPTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCLIPVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFConvBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2ForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCTRLForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCTRLLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCTRLModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCTRLPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCvtForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCvtModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCvtPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDebertaForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDebertaV2ForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDeiTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTForMaskedImageModeling(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFAdaptiveEmbedding(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDistilBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDPRContextEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRPretrainedContextEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRPretrainedReader(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRQuestionEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRReader(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFEfficientFormerForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEfficientFormerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEfficientFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFElectraForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEncoderDecoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFEsmForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFFlaubertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertWithLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFFunnelBaseModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFGPT2DoubleHeadsModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2LMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2MainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFGroupViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFHubertForCTC(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFHubertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFHubertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLayoutLMForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLEDForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLEDModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLEDPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLongformerForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerSelfAttention(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLxmertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertVisualFeatureEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMarianModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMarianMTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMarianPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMBartModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMBartPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFMobileBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFMobileViTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFMPNetForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMT5EncoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMT5Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOPTForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOPTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOPTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPegasusModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagSequenceForGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagTokenForGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRegNetForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRegNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRegNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRemBertForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFResNetForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFResNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFResNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRobertaForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRoFormerForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSamModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSamPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSegformerDecodeHead(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSpeech2TextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSpeech2TextPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSwinForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSwinForMaskedImageModeling(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSwinModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSwinPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFT5EncoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFT5Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFT5PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFTapasForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFVisionEncoderDecoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFVisionTextDualEncoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTMAEForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTMAEModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFWav2Vec2ForCTC(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWav2Vec2ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWav2Vec2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWav2Vec2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFWhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWhisperModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXGLMForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXGLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXGLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXLMForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMWithLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXLMRobertaForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXLNetForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class AdamWeightDecay(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class GradientAccumulator(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class WarmUp(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +def create_optimizer(*args, **kwargs): + requires_backends(create_optimizer, ["tf"]) + + +class TFTrainer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) diff --git a/modified/utils/dummy_tokenizers_objects.py b/modified/utils/dummy_tokenizers_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..b8cc21303a815ad1697d0d3dd9b643ed30fec4ef --- /dev/null +++ b/modified/utils/dummy_tokenizers_objects.py @@ -0,0 +1,429 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AlbertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BartTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BarthezTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BigBirdTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BlenderbotTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BlenderbotSmallTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BloomTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CamembertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CLIPTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CodeLlamaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CodeGenTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class ConvBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CpmTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DebertaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DebertaV2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RetriBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DistilBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DPRContextEncoderTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DPRQuestionEncoderTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DPRReaderTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class ElectraTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class FNetTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class FunnelTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GPT2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GPTNeoXTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GPTNeoXJapaneseTokenizer(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class HerbertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutLMv2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutLMv3TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutXLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LEDTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LlamaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LongformerTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LxmertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MarkupLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MBartTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MBart50TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MobileBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MPNetTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MT5TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MvpTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class NllbTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class NougatTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class OpenAIGPTTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class PegasusTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RealmTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class ReformerTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RemBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RobertaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RoFormerTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class SeamlessM4TTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class SplinterTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class SqueezeBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class T5TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class WhisperTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class XGLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class XLMRobertaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class XLNetTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class PreTrainedTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) diff --git a/modified/utils/dummy_vision_objects.py b/modified/utils/dummy_vision_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..f1a10ff5710ad0e06d32b677ce6b649eca6d69a8 --- /dev/null +++ b/modified/utils/dummy_vision_objects.py @@ -0,0 +1,576 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class ImageProcessingMixin(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ImageFeatureExtractionMixin(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BeitFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BeitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BlipImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BridgeTowerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ChineseCLIPFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ChineseCLIPImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class CLIPFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class CLIPImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConditionalDetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConditionalDetrImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConvNextFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConvNextImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeformableDetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeformableDetrImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeiTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeiTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DetaImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DetrImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DonutFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DonutImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DPTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DPTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class EfficientFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class EfficientNetImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FlavaFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FlavaImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FlavaProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FuyuImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FuyuProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class GLPNFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class GLPNImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class IdeficsImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ImageGPTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ImageGPTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv2FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv3FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv3ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LevitFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LevitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Mask2FormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MaskFormerFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MaskFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV1FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV1ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV2FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileViTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileViTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class NougatImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class OneFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Owlv2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class OwlViTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class OwlViTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PerceiverFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PerceiverImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Pix2StructImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PoolFormerFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PoolFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PvtImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SamImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SegformerFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SegformerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Swin2SRImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class TvltImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class TvpImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VideoMAEFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VideoMAEImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViltFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViltImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViltProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViTHybridImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VitMatteImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VivitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class YolosFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class YolosImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) diff --git a/modified/utils/fx.py b/modified/utils/fx.py new file mode 100644 index 0000000000000000000000000000000000000000..1559da0e53c68a3b167b0247283ffcc7f5d8b603 --- /dev/null +++ b/modified/utils/fx.py @@ -0,0 +1,1260 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import builtins +import collections +import functools +import inspect +import math +import operator +import os +import random +import warnings +from typing import Any, Callable, Dict, List, Optional, Type, Union + +import torch +from torch import nn +from torch.fx import Graph, GraphModule, Proxy, Tracer +from torch.fx._compatibility import compatibility +from torch.fx.proxy import ParameterProxy + +from .. import PretrainedConfig, PreTrainedModel, logging +from ..models.auto import get_values +from ..models.auto.modeling_auto import ( + MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_BACKBONE_MAPPING_NAMES, + MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_CTC_MAPPING_NAMES, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, + MODEL_FOR_MASKED_LM_MAPPING_NAMES, + MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, + MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, + MODEL_FOR_PRETRAINING_MAPPING_NAMES, + MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, + MODEL_MAPPING_NAMES, +) +from ..utils import ( + ENV_VARS_TRUE_VALUES, + TORCH_FX_REQUIRED_VERSION, + get_torch_version, + is_peft_available, + is_torch_fx_available, +) + + +if is_peft_available(): + from peft import PeftModel + + +logger = logging.get_logger(__name__) +_IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES + + +def _generate_supported_model_class_names( + model_name: Type[PretrainedConfig], + supported_tasks: Optional[Union[str, List[str]]] = None, +) -> List[str]: + task_mapping = { + "default": MODEL_MAPPING_NAMES, + "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES, + "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, + "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES, + "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, + "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, + "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, + "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, + "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, + "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, + "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, + "ctc": MODEL_FOR_CTC_MAPPING_NAMES, + "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, + "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, + "backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES, + } + + if supported_tasks is None: + supported_tasks = task_mapping.keys() + if isinstance(supported_tasks, str): + supported_tasks = [supported_tasks] + + model_class_names = [] + for task in supported_tasks: + class_name = task_mapping[task].get(model_name, None) + if class_name: + model_class_names.append(class_name) + + return model_class_names + + +_REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [ + "altclip", + "albert", + "bart", + "bert", + "blenderbot", + "blenderbot-small", + "bloom", + "clip", + "convnext", + "deberta", + "deberta-v2", + "dinov2", + "distilbert", + "donut-swin", + "electra", + "gpt2", + "gpt_neo", + "gptj", + "hubert", + "layoutlm", + "lxmert", + "m2m_100", + "marian", + "mbart", + "megatron-bert", + "mobilebert", + "mt5", + "nezha", + "opt", + "pegasus", + "plbart", + "resnet", + "roberta", + "segformer", + "speech_to_text", + "speech_to_text_2", + "swin", + "t5", + "trocr", + "vit", + "xglm", + "wav2vec2", + # "xlnet", +] + +_REGULAR_SUPPORTED_MODELS = [] +for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS: + if isinstance(item, dict): + _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item)) + else: + _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item)) + +_SPECIAL_SUPPORTED_MODELS = [ + "CLIPTextModel", + "CLIPTextModelWithProjection", + "CLIPVisionModel", + "CLIPVisionModelWithProjection", + "AltCLIPTextModel", + "AltCLIPVisionModel", + "GitVisionModel", + "GPT2DoubleHeadsModel", + "Speech2Text2Decoder", + "TrOCRDecoder", + "PeftModelForCausalLM", + "PeftModelForSeq2SeqLM", + # TODO: add support for them as it should be quite easy to do so (small blocking issues). + # XLNetForQuestionAnswering, +] +_SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS))) + + +def torch_nn_embedding(self, input): + return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype) + + +def torch_nn_functional_embedding( + input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False +): + return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype) + + +def torch_nn_layernorm(self, input): + return input + + +def torch_nn_groupnorm(self, input): + return input + + +def torch_nn_linear(self, input): + return torch.empty(input.shape[:-1] + (self.out_features,), device="meta") + + +def torch_relu(x): + return x + + +def torch_nn_relu(self, x): + return x + + +def torch_nn_functional_relu(x, inplace=False): + if not inplace: + raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") + return x + + +def torch_where(condition, x, y): + # torch.where returns the broadcasted tensor of condition, x, and y, + # so hack it by using addition + return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta") + + +def torch_abs(input, *, out=None): + if out is not None: + raise ValueError("Don't support in-place abs for MetaTensor analysis") + return input + + +def torch_arange(*args, **kwargs): + n = len(args) + step = 1 + if n == 1: + start = 0 + end = args[0] + elif n == 2: + start, end = args + else: + start, end, step = args + if isinstance(start, float): + start = int(start) + if isinstance(end, float): + start = int(end) + if isinstance(step, float): + step = int(step) + step = kwargs.get("step", step) + dtype = kwargs.get("dtype") + return torch.empty((end - start) // step, dtype=dtype, device="meta") + + +def torch_full(*args, **kwargs): + args = list(args) + if isinstance(args[1], torch.Tensor) and args[1].device == torch.device("meta"): + args[1] = 1 # Any value. + kwargs_without_device = dict(kwargs) + kwargs_without_device.pop("device", None) + return torch.full(*args, **kwargs_without_device) + + +def torch_cat(tensors, dim=None, axis=None, *, out=None): + if dim is None and axis is None: + dim = 0 + if dim is None and axis is not None: + dim = axis + if dim < 0: + dim = tensors[0].dim() + dim + shapes = [t.shape for t in tensors] + shape = list(shapes[0]) + concatenated_dim = sum(shape[dim] for shape in shapes) + final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :] + return torch.empty(final_shape, device="meta") + + +def torch_stack(tensors, dim=None, axis=None, *, out=None): + if dim is None and axis is None: + dim = 0 + if dim is None and axis is not None: + dim = axis + if dim < 0: + dim = tensors[0].dim() + 1 + dim + shape = list(tensors[0].shape) + shape.insert(dim, len(tensors)) + return torch.empty(shape, device="meta") + + +def torch_add(input, other, *, alpha=1, out=None): + if not isinstance(input, torch.Tensor): + return torch.empty_like(other, device="meta") + if not isinstance(other, torch.Tensor): + return torch.empty_like(input, device="meta") + max_length = max(input.dim(), other.dim()) + input_shape = list(input.shape) + [1] * (max_length - input.dim()) + other_shape = list(other.shape) + [1] * (max_length - other.dim()) + shape = [] + for i in range(max_length): + shape.append(max(input_shape[i], other_shape[i])) + return torch.empty(shape, device="meta") + + +def torch_mul(input, other, *, out=None): + return torch_add(input, other, out=out) + + +def torch_tensor_mul(self, other): + return torch_mul(self, other) + + +def torch_matmul(input, other, *, out=None): + d1 = input.dim() + d2 = other.dim() + shape = None + if d1 == 1 and d2 == 1: + shape = None + elif d1 == 2 and d2 == 2: + shape = (input.size(0), other.size(1)) + elif d1 == 1 and d2 == 2: + shape = (other.size(1),) + elif d1 == 2 and d1 == 1: + shape = (input.size(0),) + else: + max_length = max(input.dim(), other.dim()) + shape1 = list(input.shape) + shape2 = list(other.shape) + if d1 == 1: + shape1 = [1] + shape1 + if d2 == 1: + shape2.append(1) + shape1 = [-1] * (max_length - d1) + list(input.shape) + shape2 = [-1] * (max_length - d2) + list(other.shape) + shape = [] + for i in range(max_length): + shape.append(max(shape1[i], shape2[i])) + shape[-2] = shape1[-2] + shape[-1] = shape2[-1] + if d1 == 1: + shape.pop(-2) + if d2 == 1: + shape.pop(-1) + if shape is None: + return torch.tensor(0.0, device="meta") + return torch.empty(*shape, device="meta") + + +def torch_bmm(input, mat2, *, out=None): + if out is not None: + raise ValueError("Don't support in-place bmm for MetaTensor analysis") + batch_size, n, m = input.shape + _, _, p = mat2.shape + return torch.empty(batch_size, n, p, device="meta") + + +def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): + if out is not None: + raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") + return torch_bmm(batch1, batch2) + + +def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): + return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out) + + +def torch_einsum(equation, *operands): + # TODO: infer shape without performing the computation, this might be quite hard. + concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) + return torch.einsum(equation, *concrete_operands).to("meta") + + +def torch_tensor_repeat(self, *sizes): + shape = list(self.shape) + for i, x in enumerate(sizes): + shape[i] *= x + return torch.empty(shape, device="meta") + + +def torch_repeat_interleave(*args, dim=None, output_size=None): + num_args = len(args) + if num_args == 1: + shape = [output_size if output_size is not None else args[0].sum()] + else: + shape = list(args[0].shape) + if dim is None: + if num_args > 2: + dim = args[2] + else: + shape = [sum(shape)] + dim = 0 + repeats = args[1] + if isinstance(repeats, int) or torch.numel(repeats) == 1: + shape[dim] *= int(repeats) + else: + shape[dim] = output_size if output_size is not None else repeats.sum() + return torch.empty(*shape, device="meta") + + +def torch_index_select(input, dim, index, *, out=None): + shape = list(input.shape) + shape[dim] = len(index) + return torch.empty(*shape, device="meta") + + +def torch_tensor_index_select(self, dim, index): + return torch_index_select(self, dim, index) + + +def torch_gather(input, dim, index, *, sparse_grad=False, out=None): + shape = list(input.shape) + shape[dim] = index.shape[dim] + return torch.empty(*shape, device="meta") + + +def torch_tensor_gather(self, dim, index): + return torch_gather(self, dim, index) + + +def torch_roll(input, shifts, dims=None): + return input + + +def torch_flip(input, dims): + return input + + +def torch_tensor_flip(self, dims): + return self + + +def torch_nn_conv1d(self, input): + l_in = input.shape[-1] + shape = None + padding = self.padding + if padding == "valid": + padding = (0, 0) + if padding == "same": + shape = list(input.shape) + if shape is None: + shape = list(input.shape) + l_out = math.floor( + (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 + ) + shape[-1] = l_out + shape[-2] = self.out_channels + return torch.empty(shape, device="meta") + + +def torch_nn_conv2d(self, input): + h_in, w_in = input.shape[-2:] + shape = None + padding = self.padding + if padding == "valid": + padding = (0, 0) + if padding == "same": + shape = list(input.shape) + if shape is None: + shape = list(input.shape) + h_out = math.floor( + (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 + ) + w_out = math.floor( + (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1 + ) + shape[-2:] = [h_out, w_out] + shape[-3] = self.out_channels + return torch.empty(shape, device="meta") + + +def torch_squeeze(input, dim=None): + shape = list(input.shape) + if dim is not None: + if dim < 0: + dim = input.dim() + dim + if shape[dim] == 1: + shape.pop(dim) + else: + new_shape = [] + for dim_value in shape: + if dim_value == 1: + continue + new_shape.append(dim_value) + shape = new_shape + return torch.empty(shape, device="meta") + + +def torch_tensor_squeeze(self, dim=None): + return torch_squeeze(self, dim) + + +def torch_unsqueeze(input, dim): + shape = list(input.shape) + if dim < 0: + dim = input.dim() + 1 + dim + shape.insert(dim, 1) + return torch.empty(shape, device="meta") + + +def torch_tensor_unsqueeze(self, dim): + return torch_unsqueeze(self, dim) + + +def torch_unique_consecutive(input, **kwargs): + output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs) + if isinstance(output, torch.Tensor): + return output.to("meta") + else: + return tuple(map(output, lambda x: x.to("meta"))) + + +def torch_nn_functional_one_hot(tensor, num_classes=-1): + if num_classes < 0: + raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis") + shape = list(tensor.shape) + [num_classes] + return torch.empty(shape, device="meta") + + +def torch_nn_mseloss(self, input, target): + if self.reduction == "none": + shape = target.shape + else: + shape = (1,) + return torch.empty(shape, device="meta") + + +def torch_nn_crossentropyloss(self, input, target): + if self.reduction == "none": + shape = target.shape + else: + shape = (1,) + return torch.empty(shape, device="meta") + + +def torch_nn_bcewithlogitsloss(self, input, target): + if self.reduction == "none": + shape = target.shape + else: + shape = (1,) + return torch.empty(shape, device="meta") + + +def operator_getitem(a, b): + def to_concrete(t): + if isinstance(t, torch.Tensor): + concrete = torch.ones_like(t, device="cpu") + if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]: + concrete = concrete.to(torch.int64) + return concrete + return t + + if isinstance(a, torch.Tensor): + # TODO: infer shape without performing the computation. + if isinstance(b, tuple): + b = tuple(map(to_concrete, b)) + else: + b = to_concrete(b) + return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta") + return operator.getitem(a, b) + + +_MANUAL_META_OVERRIDES: Dict[Callable, Callable] = { + torch.nn.Embedding: torch_nn_embedding, + torch.nn.functional.embedding: torch_nn_functional_embedding, + torch.nn.LayerNorm: torch_nn_layernorm, + torch.nn.GroupNorm: torch_nn_groupnorm, + torch.nn.Linear: torch_nn_linear, + torch.relu: torch_relu, + torch.nn.functional.relu: torch_nn_functional_relu, + torch.nn.ReLU: torch_nn_relu, + torch.where: torch_where, + torch.abs: torch_abs, + torch.arange: torch_arange, + torch.full: torch_full, + torch.cat: torch_cat, + torch.stack: torch_stack, + torch.add: torch_add, + torch.mul: torch_mul, + torch.Tensor.mul: torch_tensor_mul, + torch.matmul: torch_matmul, + torch.bmm: torch_bmm, + torch.baddbmm: torch_baddbmm, + torch.Tensor.baddbmm: torch_tensor_baddbmm, + torch.einsum: torch_einsum, + torch.Tensor.repeat: torch_tensor_repeat, + torch.repeat_interleave: torch_repeat_interleave, + torch.roll: torch_roll, + torch.flip: torch_flip, + torch.Tensor.flip: torch_tensor_flip, + torch.index_select: torch_index_select, + torch.Tensor.index_select: torch_tensor_index_select, + torch.gather: torch_gather, + torch.Tensor.gather: torch_tensor_gather, + torch.nn.Conv1d: torch_nn_conv1d, + torch.nn.Conv2d: torch_nn_conv2d, + torch.squeeze: torch_squeeze, + torch.Tensor.squeeze: torch_tensor_squeeze, + torch.unsqueeze: torch_unsqueeze, + torch.Tensor.unsqueeze: torch_tensor_unsqueeze, + torch.unique_consecutive: torch_unique_consecutive, + torch.nn.functional.one_hot: torch_nn_functional_one_hot, + torch.nn.MSELoss: torch_nn_mseloss, + torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss, + torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss, + operator.getitem: operator_getitem, +} + + +class HFProxy(Proxy): + """ + Proxy that uses metadata to handle data-dependent control-flow. + """ + + def install_metadata(self, metadata): + self._metadata = metadata + + @property + def shape(self): + return self.tracer.create_proxy("call_method", "size", (self,), {}) + + @property + def device(self): + # Hack so we can track when devices are used. During meta-tensor propagation, + # replace these values with a constant 'meta' + return MetaDeviceAttribute(self, "device") + + def __len__(self): + if hasattr(self, "_metadata") and self._metadata is not None: + return len(self._metadata) + return super().__len__() + + def __bool__(self): + if hasattr(self, "_metadata") and self._metadata is not None: + return self._metadata + return super().__bool__() + + def __getattr__(self, k): + if k == "_metadata": + return self.__getattribute__(k) + # note: not added to the graph yet, if this is a method call + # we peephole optimize to the method invocation + return HFAttribute(self, k) + + def __setitem__(self, indices, values): + return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {}) + + def __contains__(self, key): + if hasattr(self, "_metadata") and self._metadata is not None: + return key in self._metadata + return super().__contains__(key) + + +class HFAttribute(HFProxy): + def __init__(self, root, attr: str): + self.root = root + self.attr = attr + self.tracer = root.tracer + self._node = None + + if hasattr(self.root, "_metadata"): + self.install_metadata(getattr(self.root._metadata, attr)) + + @property + def node(self): + # the node for attributes is added lazily, since most will just be method calls + # which do not rely on the getitem call + if self._node is None: + self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node + return self._node + + def __call__(self, *args, **kwargs): + return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs) + + +class MetaDeviceAttribute(HFAttribute): + pass + + +def _proxies_to_metas(v): + """Returns the underlying metadata for HFProxies, and behaves like the identity for the others.""" + if isinstance(v, MetaDeviceAttribute): + return "meta" + if isinstance(v, torch.fx.Proxy): + if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")): + raise RuntimeError(f"No metadata was found for {v}") + return v._metadata + return v + + +def _gen_constructor_wrapper(target): + @functools.wraps(target) + def wrapper(*args, **kwargs): + proxy = None + + def check_has_proxy(v): + if isinstance(v, Proxy): + nonlocal proxy + proxy = v + + torch.fx.node.map_aggregate(args, check_has_proxy) + torch.fx.node.map_aggregate(kwargs, check_has_proxy) + + if proxy is not None: + return proxy.tracer.create_proxy("call_function", target, args, kwargs) + else: + return target(*args, **kwargs) + + return wrapper, target + + +def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None): + if forbidden_values is None: + forbidden_values = [] + value = random.randint(low, high) + while value in forbidden_values: + value = random.randint(low, high) + return value + + +class HFTracer(Tracer): + """ + Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the + regular PyTorch torch.fx.Proxy. + """ + + # Feature flag for proxying accesses to buffer values + proxy_buffer_attributes: bool = True + allow_insert_stateless_mods: bool = True + _TORCH_METHODS_TO_PATCH = [ + "arange", + "zeros", + "ones", + "full", + "full_like", + "eye", + "empty", + "tensor", + "clamp", + "finfo", + ] + supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) + + def __init__(self, autowrap_modules=(math,), autowrap_functions=()): + super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions) + + if not is_torch_fx_available(): + raise ImportError( + f"Found an incompatible version of torch. Found version {get_torch_version()}, but only version " + f"{TORCH_FX_REQUIRED_VERSION} is supported." + ) + + def _generate_dummy_input( + self, model: PreTrainedModel, input_name: str, shape: List[int] + ) -> Dict[str, torch.Tensor]: + """Generates dummy input for model inference recording.""" + # Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored + # from pickle, or from the "__class__" attribute in the general case. + model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__ + device = model.device + inputs_dict = {} + + if input_name in ["labels", "start_positions", "end_positions"]: + batch_size = shape[0] + if model_class_name in [ + *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), + *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), + *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), + *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), + *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), + ]: + inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) + elif model_class_name in [ + *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), + "XLNetForQuestionAnswering", + ]: + inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) + inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) + elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): + if not hasattr(model.config, "problem_type") or model.config.problem_type is None: + raise ValueError( + "Could not retrieve the problem type for the sequence classification task, please set " + 'model.config.problem_type to one of the following values: "regression", ' + '"single_label_classification", or "multi_label_classification".' + ) + + if model.config.problem_type == "regression": + labels_shape = (batch_size, model.config.num_labels) + labels_dtype = torch.float32 + elif model.config.problem_type == "single_label_classification": + labels_shape = (batch_size,) + labels_dtype = torch.long + elif model.config.problem_type == "multi_label_classification": + labels_shape = (batch_size, model.config.num_labels) + labels_dtype = torch.float32 + else: + raise ValueError( + 'Expected model.config.problem_type to be either: "regression", "single_label_classification"' + f', or "multi_label_classification", but "{model.config.problem_type}" was provided.' + ) + inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) + + elif model_class_name in [ + *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), + *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), + *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), + *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), + *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), + *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), + "GPT2DoubleHeadsModel", + "PeftModelForCausalLM", + "PeftModelForSeq2SeqLM", + ]: + inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) + elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]: + inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device) + else: + raise NotImplementedError( + f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." + ) + elif "pixel_values" in input_name: + batch_size = shape[0] + image_size = getattr(model.config, "image_size", None) + if image_size is None: + if hasattr(model.config, "vision_config"): + image_size = model.config.vision_config.image_size + elif hasattr(model.config, "encoder"): + image_size = model.config.encoder.image_size + else: + image_size = (_generate_random_int(), _generate_random_int()) + + # If no num_channels is in the config, use some arbitrary value. + num_channels = getattr(model.config, "num_channels", 3) + if not isinstance(image_size, collections.abc.Iterable): + image_size = (image_size, image_size) + height, width = image_size + inputs_dict[input_name] = torch.zeros( + batch_size, num_channels, height, width, dtype=torch.float32, device=device + ) + elif "bbox" in input_name: + inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device) + elif "input_features" in input_name: + inputs_dict[input_name] = torch.zeros( + *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device + ) + elif "visual_feats" in input_name: + inputs_dict[input_name] = torch.zeros( + shape + + [ + model.config.visual_feat_dim, + ], + dtype=torch.float, + device=device, + ) + elif "visual_pos" in input_name: + inputs_dict[input_name] = torch.zeros( + shape + + [ + model.config.visual_pos_dim, + ], + dtype=torch.float, + device=device, + ) + elif "inputs" in input_name: + inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device) + elif "input_values" in input_name: + batch_size, _ = shape + # Generating big sequence length for audio inputs. + seq_length = _generate_random_int(low=10000, high=20000) + inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device) + elif "mask" in input_name or "ids" in input_name: + inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device) + else: + shape_with_hidden_size = shape + [model.config.hidden_size] + inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device) + + return inputs_dict + + def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): + rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) + + if kind == "placeholder" and target in self.meta_args: + rv.install_metadata(self.meta_args[target]) + return rv + + if target in self.orig_fns: + # NOTE: tensor constructors in PyTorch define the `device` argument as + # *kwargs-only*. That is why this works. If you add methods to + # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only, + # this will break and you will likely see issues where we cannot infer + # the size of the output. + if "device" in kwargs: + kwargs["device"] = "meta" + + try: + args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas) + kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas) + + if kind == "call_function": + meta_target = _MANUAL_META_OVERRIDES.get(target, target) + meta_out = meta_target(*args_metas, **kwargs_metas) + if isinstance(meta_out, torch.Tensor): + meta_out = meta_out.to(device="meta") + elif kind == "call_method": + method = getattr(args_metas[0].__class__, target) + meta_target = _MANUAL_META_OVERRIDES.get(method, method) + meta_out = meta_target(*args_metas, **kwargs_metas) + elif kind == "call_module": + if not hasattr(self, "orig_forward"): + raise AttributeError(f"{self} does not have an attribute called orig_forward") + self._disable_module_getattr = True + try: + mod = self.root.get_submodule(target) + mod_type = type(mod) + if mod_type in _MANUAL_META_OVERRIDES: + meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas) + else: + meta_out = self.orig_forward(*args_metas, **kwargs_metas) + finally: + self._disable_module_getattr = False + elif kind == "get_attr": + self._disable_module_getattr = True + try: + attr_itr = self.root + atoms = target.split(".") + for atom in atoms: + attr_itr = getattr(attr_itr, atom) + if isinstance(attr_itr, torch.Tensor): + meta_out = attr_itr.to(device="meta") + else: + meta_out = attr_itr + finally: + self._disable_module_getattr = False + else: + return rv + + if not isinstance(rv, Proxy): + raise ValueError("Don't support composite output yet") + rv.install_metadata(meta_out) + except Exception as e: + if _IS_IN_DEBUG_MODE: + warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") + + return rv + + # Replaced by .getattr from PyTorch 1.13 + def _module_getattr(self, attr, attr_val, parameter_proxy_cache): + if getattr(self, "_disable_module_getattr", False): + return attr_val + else: + + def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): + for n, p in collection_to_search: + if attr_val is p: + if n not in parameter_proxy_cache: + kwargs = {} + if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: + kwargs["proxy_factory_fn"] = ( + None + if not self.param_shapes_constant + else lambda node: ParameterProxy(self, node, n, attr_val) + ) + val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] + parameter_proxy_cache[n] = val_proxy + return parameter_proxy_cache[n] + return None + + if isinstance(attr_val, torch.nn.Parameter): + maybe_parameter_proxy = maybe_get_proxy_for_attr( + attr_val, self.root.named_parameters(), parameter_proxy_cache + ) + if maybe_parameter_proxy is not None: + return maybe_parameter_proxy + + if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): + maybe_buffer_proxy = maybe_get_proxy_for_attr( + attr_val, self.root.named_buffers(), parameter_proxy_cache + ) + if maybe_buffer_proxy is not None: + return maybe_buffer_proxy + + return attr_val + + # Needed for PyTorch 1.13+ + def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): + return self._module_getattr(attr, attr_val, parameter_proxy_cache) + + def call_module(self, m, forward, args, kwargs): + self.orig_forward = forward + return super().call_module(m, forward, args, kwargs) + + def proxy(self, node): + return HFProxy(node, self) + + def trace( + self, + root: Union[torch.nn.Module, Callable[..., Any]], + concrete_args: Optional[Dict[str, Any]] = None, + dummy_inputs: Optional[Dict[str, Any]] = None, + complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, + ) -> Graph: + """ + Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a + `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from + the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a + `torch.nn.Module` instance to use as the root and add embedded constants to. + + Args: + root (`torch.nn.Module` or `Callable`): + Either a `torch.nn.Module`` or a function to be traced through. If root is not a + [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail. + concrete_args (`Dict[str, Any], *optional*): + Concrete arguments that should not be treated as Proxies + dummy_inputs (`Dict[str, Any]`, *optional*): + The dummy inputs needed to handle data-dependent control-flow if `root` is not a + [`~transformers.PreTrainedModel`]. It can also be used when `root` is a + [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs. + complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`): + If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in + `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing. + + Returns: + `torch.fx.Graph`: + A FX `torch.fx.Graph` representing the semantics of the passed-in `root`. + + """ + sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) + + if concrete_args is None: + concrete_args = {} + + if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: + for param in sig.parameters.values(): + if param.name in dummy_inputs: + continue + if param.default is inspect.Parameter.empty: + raise ValueError(f"You need to specify a default value for the parameter {param.name}.") + concrete_args.update( + { + p.name: p.default + for p in sig.parameters.values() + if (p.name not in dummy_inputs and p.name not in concrete_args) + } + ) + + input_names = sig.parameters.keys() - concrete_args.keys() + + # Creating a random input shape to generate dummy inputs. + batch_size = _generate_random_int() + sequence_length = _generate_random_int() + shape = [batch_size, sequence_length] + + if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): + num_choices = _generate_random_int(low=2, high=5) + shape.insert(1, num_choices) + + inputs = dict(dummy_inputs) if dummy_inputs is not None else {} + for input_name in input_names: + if input_name in inputs: + continue + # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to + # be able to use HFTracer._generate_dummy_input. + if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith( + ("_deserialize_graph_module", "_CodeOnlyModule") + ): + inputs.update(self._generate_dummy_input(root, input_name, shape)) + else: + raise RuntimeError( + f"Could not generate input named {input_name} for because root is not a" + " transformers.PreTrainedModel." + ) + + concrete_metas = { + input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_ + for input_name, input_ in inputs.items() + } + for param in sig.parameters.values(): + if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: + concrete_metas[f"**{param.name}"] = {} + self.meta_args = concrete_metas + self.patched_torch_methods = { + target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH + } + self.orig_fns = set() + + for name, (wrapper, orig) in self.patched_torch_methods.items(): + setattr(torch, name, wrapper) + self.orig_fns.add(orig) + + try: + self.graph = super().trace(root, concrete_args=concrete_args) + finally: + for name, (_, orig) in self.patched_torch_methods.items(): + setattr(torch, name, orig) + + # This is necessary because concrete args are added as input to the traced module since + # https://github.com/pytorch/pytorch/pull/55888. + for node in self.graph.nodes: + if node.op == "placeholder": + # Removing default values for inputs as the forward pass will fail with them. + if node.target in input_names: + node.args = () + # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. + # It cannot infer on the attributes and methods the input should have, and fails. + node.type = torch.Tensor + # It is a concrete arg so it is not used and should be removed. + else: + to_visit = [node] + to_delete = collections.OrderedDict() + while to_visit: + n = to_visit.pop(0) + to_delete[n] = None + to_visit += list(n.users.keys()) + + for user in reversed(to_delete.keys()): + self.graph.erase_node(user) + + # TODO: solves GraphModule creation. + # Without this, return type annotation "Tuple" is causing code execution failure. + if node.op == "output": + node.type = None + + return self.graph + + def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool: + """ + Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module + because its attributes are input-dependent. + """ + return any(isinstance(attr, Proxy) for attr in mod.__dict__.values()) + + def _insert_module_as_submodule(self, mod: nn.Module) -> str: + """ + Helper method which tries to insert a module that was not declared as submodule. + """ + # If one of the module attributes is a Proxy, it means that its instantiation is input-dependent. + # It is not possible to insert such modules, those should be traced through. + if self._stateless_mod_instanciation_depends_on_proxies(mod): + return "" + idx = 0 + mod_name = mod.__class__.__name__.lower() + path = f"{mod_name}_{idx}" + already_inserted = False + while hasattr(self.root, path): + if getattr(self.root, path) is mod: + already_inserted = True + break + path = f"{mod_name}_{idx}" + idx += 1 + + # No need to add multiple instances of the same module. + if not already_inserted: + self.root.add_module(path, mod) + return path + + def path_of_module(self, mod: nn.Module) -> str: + """ + Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has + a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the + string "foo.bar". + + Args: + mod (str): The `Module` to retrieve the qualified name for. + """ + try: + return super().path_of_module(mod) + except NameError as e: + if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: + path = self._insert_module_as_submodule(mod) + return path + raise e + + def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: + return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module( + m, module_qualified_name + ) + + @compatibility(is_backward_compatible=True) + def keys(self, obj: "Proxy") -> Any: + """Called when a proxy object is has the keys() method called. + This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in + your custom tracer. + """ + attribute = HFAttribute(obj, "keys")() + if obj.node.target == "**kwargs": + return attribute._metadata + return attribute + + +def get_concrete_args(model: nn.Module, input_names: List[str]): + sig = inspect.signature(model.forward) + + if not (set(input_names) <= set(sig.parameters.keys())): + formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names) + formatted_allowed_input_names = ", ".join(sig.parameters.keys()) + raise ValueError( + f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:" + f" {formatted_allowed_input_names}" + ) + + return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names} + + +def check_if_model_is_supported(model: PreTrainedModel): + if model.__class__.__name__ not in _SUPPORTED_MODELS: + supported_model_names = ", ".join(_SUPPORTED_MODELS) + raise NotImplementedError( + f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}" + ) + + +def symbolic_trace( + model: PreTrainedModel, + input_names: Optional[List[str]] = None, + disable_check: bool = False, + tracer_cls: Type[HFTracer] = HFTracer, +) -> GraphModule: + """ + Performs symbolic tracing on the model. + + Args: + model ([`PretrainedModel`]): + The model to trace. + input_names (`List[str]`, *optional*): + The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. + disable_check (`bool`, *optional*, defaults to `False`): + If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. + tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`): + The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead. + + Returns: + `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. + + Example: + + ```python + from transformers.utils.fx import symbolic_trace + + traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) + ``` + """ + if input_names is None: + input_names = model.dummy_inputs.keys() + + input_names = list(input_names) + concrete_args = get_concrete_args(model, input_names) + + if not disable_check: + check_if_model_is_supported(model) + + # Tracing. + tracer = tracer_cls() + traced_graph = tracer.trace(model, concrete_args=concrete_args) + traced = torch.fx.GraphModule(model, traced_graph) + + traced.config = model.config + # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus + # _generate_dummy_input, where the model class is needed. + traced.class_for_deserialization = model.__class__ + traced.device = model.device + + return traced diff --git a/modified/utils/generic.py b/modified/utils/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..34dac8bea70cfc1ac552a516aacef69b7c8d1377 --- /dev/null +++ b/modified/utils/generic.py @@ -0,0 +1,690 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Generic utilities +""" + +import inspect +import tempfile +from collections import OrderedDict, UserDict +from collections.abc import MutableMapping +from contextlib import ExitStack, contextmanager +from dataclasses import fields, is_dataclass +from enum import Enum +from typing import Any, ContextManager, Iterable, List, Tuple + +import numpy as np + +from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy + + +if is_flax_available(): + import jax.numpy as jnp + + +class cached_property(property): + """ + Descriptor that mimics @property but caches output in member variable. + + From tensorflow_datasets + + Built-in in functools from Python 3.8. + """ + + def __get__(self, obj, objtype=None): + # See docs.python.org/3/howto/descriptor.html#properties + if obj is None: + return self + if self.fget is None: + raise AttributeError("unreadable attribute") + attr = "__cached_" + self.fget.__name__ + cached = getattr(obj, attr, None) + if cached is None: + cached = self.fget(obj) + setattr(obj, attr, cached) + return cached + + +# vendored from distutils.util +def strtobool(val): + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. + Raises ValueError if 'val' is anything else. + """ + val = val.lower() + if val in {"y", "yes", "t", "true", "on", "1"}: + return 1 + if val in {"n", "no", "f", "false", "off", "0"}: + return 0 + raise ValueError(f"invalid truth value {val!r}") + + +def infer_framework_from_repr(x): + """ + Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the + frameworks in a smart order, without the need to import the frameworks). + """ + representation = str(type(x)) + if representation.startswith(" + + You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple + before. + + + """ + + def __init_subclass__(cls) -> None: + """Register subclasses as pytree nodes. + + This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with + `static_graph=True` with modules that output `ModelOutput` subclasses. + """ + if is_torch_available(): + _torch_pytree._register_pytree_node( + cls, + _model_output_flatten, + _model_output_unflatten, + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Subclasses of ModelOutput must use the @dataclass decorator + # This check is done in __init__ because the @dataclass decorator operates after __init_subclass__ + # issubclass() would return True for issubclass(ModelOutput, ModelOutput) when False is needed + # Just need to check that the current class is not ModelOutput + is_modeloutput_subclass = self.__class__ != ModelOutput + + if is_modeloutput_subclass and not is_dataclass(self): + raise TypeError( + f"{self.__module__}.{self.__class__.__name__} is not a dataclasss." + " This is a subclass of ModelOutput and so must use the @dataclass decorator." + ) + + def __post_init__(self): + """Check the ModelOutput dataclass. + + Only occurs if @dataclass decorator has been used. + """ + class_fields = fields(self) + + # Safety and consistency checks + if not len(class_fields): + raise ValueError(f"{self.__class__.__name__} has no fields.") + if not all(field.default is None for field in class_fields[1:]): + raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") + + first_field = getattr(self, class_fields[0].name) + other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) + + if other_fields_are_none and not is_tensor(first_field): + if isinstance(first_field, dict): + iterator = first_field.items() + first_field_iterator = True + else: + try: + iterator = iter(first_field) + first_field_iterator = True + except TypeError: + first_field_iterator = False + + # if we provided an iterator as first field and the iterator is a (key, value) iterator + # set the associated fields + if first_field_iterator: + for idx, element in enumerate(iterator): + if ( + not isinstance(element, (list, tuple)) + or not len(element) == 2 + or not isinstance(element[0], str) + ): + if idx == 0: + # If we do not have an iterator of key/values, set it as attribute + self[class_fields[0].name] = first_field + else: + # If we have a mixed iterator, raise an error + raise ValueError( + f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." + ) + break + setattr(self, element[0], element[1]) + if element[1] is not None: + self[element[0]] = element[1] + elif first_field is not None: + self[class_fields[0].name] = first_field + else: + for field in class_fields: + v = getattr(self, field.name) + if v is not None: + self[field.name] = v + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __getitem__(self, k): + if isinstance(k, str): + inner_dict = dict(self.items()) + return inner_dict[k] + else: + return self.to_tuple()[k] + + def __setattr__(self, name, value): + if name in self.keys() and value is not None: + # Don't call self.__setitem__ to avoid recursion errors + super().__setitem__(name, value) + super().__setattr__(name, value) + + def __setitem__(self, key, value): + # Will raise a KeyException if needed + super().__setitem__(key, value) + # Don't call self.__setattr__ to avoid recursion errors + super().__setattr__(key, value) + + def __reduce__(self): + if not is_dataclass(self): + return super().__reduce__() + callable, _args, *remaining = super().__reduce__() + args = tuple(getattr(self, field.name) for field in fields(self)) + return callable, args, *remaining + + def to_tuple(self) -> Tuple[Any]: + """ + Convert self to a tuple containing all the attributes/keys that are not `None`. + """ + return tuple(self[k] for k in self.keys()) + + +if is_torch_available(): + import torch.utils._pytree as _torch_pytree + + def _model_output_flatten(output: ModelOutput) -> Tuple[List[Any], "_torch_pytree.Context"]: + return list(output.values()), (type(output), list(output.keys())) + + def _model_output_unflatten(values: Iterable[Any], context: "_torch_pytree.Context") -> ModelOutput: + output_type, keys = context + return output_type(**dict(zip(keys, values))) + + _torch_pytree._register_pytree_node( + ModelOutput, + _model_output_flatten, + _model_output_unflatten, + ) + + +class ExplicitEnum(str, Enum): + """ + Enum with more explicit error message for missing values. + """ + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}" + ) + + +class PaddingStrategy(ExplicitEnum): + """ + Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an + IDE. + """ + + LONGEST = "longest" + MAX_LENGTH = "max_length" + DO_NOT_PAD = "do_not_pad" + + +class TensorType(ExplicitEnum): + """ + Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for + tab-completion in an IDE. + """ + + PYTORCH = "pt" + TENSORFLOW = "tf" + NUMPY = "np" + JAX = "jax" + + +class ContextManagers: + """ + Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers` + in the `fastcore` library. + """ + + def __init__(self, context_managers: List[ContextManager]): + self.context_managers = context_managers + self.stack = ExitStack() + + def __enter__(self): + for context_manager in self.context_managers: + self.stack.enter_context(context_manager) + + def __exit__(self, *args, **kwargs): + self.stack.__exit__(*args, **kwargs) + + +def can_return_loss(model_class): + """ + Check if a given model can return loss. + + Args: + model_class (`type`): The class of the model. + """ + framework = infer_framework(model_class) + if framework == "tf": + signature = inspect.signature(model_class.call) # TensorFlow models + elif framework == "pt": + signature = inspect.signature(model_class.forward) # PyTorch models + else: + signature = inspect.signature(model_class.__call__) # Flax models + + for p in signature.parameters: + if p == "return_loss" and signature.parameters[p].default is True: + return True + + return False + + +def find_labels(model_class): + """ + Find the labels used by a given model. + + Args: + model_class (`type`): The class of the model. + """ + model_name = model_class.__name__ + framework = infer_framework(model_class) + if framework == "tf": + signature = inspect.signature(model_class.call) # TensorFlow models + elif framework == "pt": + signature = inspect.signature(model_class.forward) # PyTorch models + else: + signature = inspect.signature(model_class.__call__) # Flax models + + if "QuestionAnswering" in model_name: + return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] + else: + return [p for p in signature.parameters if "label" in p] + + +def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): + """Flatten a nested dict into a single level dict.""" + + def _flatten_dict(d, parent_key="", delimiter="."): + for k, v in d.items(): + key = str(parent_key) + delimiter + str(k) if parent_key else k + if v and isinstance(v, MutableMapping): + yield from flatten_dict(v, key, delimiter=delimiter).items() + else: + yield key, v + + return dict(_flatten_dict(d, parent_key, delimiter)) + + +@contextmanager +def working_or_temp_dir(working_dir, use_temp_dir: bool = False): + if use_temp_dir: + with tempfile.TemporaryDirectory() as tmp_dir: + yield tmp_dir + else: + yield working_dir + + +def transpose(array, axes=None): + """ + Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.transpose(array, axes=axes) + elif is_torch_tensor(array): + return array.T if axes is None else array.permute(*axes) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.transpose(array, perm=axes) + elif is_jax_tensor(array): + return jnp.transpose(array, axes=axes) + else: + raise ValueError(f"Type not supported for transpose: {type(array)}.") + + +def reshape(array, newshape): + """ + Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.reshape(array, newshape) + elif is_torch_tensor(array): + return array.reshape(*newshape) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.reshape(array, newshape) + elif is_jax_tensor(array): + return jnp.reshape(array, newshape) + else: + raise ValueError(f"Type not supported for reshape: {type(array)}.") + + +def squeeze(array, axis=None): + """ + Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.squeeze(array, axis=axis) + elif is_torch_tensor(array): + return array.squeeze() if axis is None else array.squeeze(dim=axis) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.squeeze(array, axis=axis) + elif is_jax_tensor(array): + return jnp.squeeze(array, axis=axis) + else: + raise ValueError(f"Type not supported for squeeze: {type(array)}.") + + +def expand_dims(array, axis): + """ + Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.expand_dims(array, axis) + elif is_torch_tensor(array): + return array.unsqueeze(dim=axis) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.expand_dims(array, axis=axis) + elif is_jax_tensor(array): + return jnp.expand_dims(array, axis=axis) + else: + raise ValueError(f"Type not supported for expand_dims: {type(array)}.") + + +def tensor_size(array): + """ + Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. + """ + if is_numpy_array(array): + return np.size(array) + elif is_torch_tensor(array): + return array.numel() + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.size(array) + elif is_jax_tensor(array): + return array.size + else: + raise ValueError(f"Type not supported for expand_dims: {type(array)}.") + + +def add_model_info_to_auto_map(auto_map, repo_id): + """ + Adds the information of the repo_id to a given auto map. + """ + for key, value in auto_map.items(): + if isinstance(value, (tuple, list)): + auto_map[key] = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] + elif value is not None and "--" not in value: + auto_map[key] = f"{repo_id}--{value}" + + return auto_map + + +def infer_framework(model_class): + """ + Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant + classes are imported or available. + """ + for base_class in inspect.getmro(model_class): + module = base_class.__module__ + name = base_class.__name__ + if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel": + return "tf" + elif module.startswith("torch") or name == "PreTrainedModel": + return "pt" + elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel": + return "flax" + else: + raise TypeError(f"Could not infer framework from class {model_class}.") diff --git a/modified/utils/hp_naming.py b/modified/utils/hp_naming.py new file mode 100644 index 0000000000000000000000000000000000000000..f7c5cb5259f8452b09cc910aee1fec7f1ba438c8 --- /dev/null +++ b/modified/utils/hp_naming.py @@ -0,0 +1,162 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import re + + +class TrialShortNamer: + PREFIX = "hp" + DEFAULTS = {} + NAMING_INFO = None + + @classmethod + def set_defaults(cls, prefix, defaults): + cls.PREFIX = prefix + cls.DEFAULTS = defaults + cls.build_naming_info() + + @staticmethod + def shortname_for_word(info, word): + if len(word) == 0: + return "" + short_word = None + if any(char.isdigit() for char in word): + raise Exception(f"Parameters should not contain numbers: '{word}' contains a number") + if word in info["short_word"]: + return info["short_word"][word] + for prefix_len in range(1, len(word) + 1): + prefix = word[:prefix_len] + if prefix in info["reverse_short_word"]: + continue + else: + short_word = prefix + break + + if short_word is None: + # Paranoid fallback + def int_to_alphabetic(integer): + s = "" + while integer != 0: + s = chr(ord("A") + integer % 10) + s + integer //= 10 + return s + + i = 0 + while True: + sword = word + "#" + int_to_alphabetic(i) + if sword in info["reverse_short_word"]: + continue + else: + short_word = sword + break + + info["short_word"][word] = short_word + info["reverse_short_word"][short_word] = word + return short_word + + @staticmethod + def shortname_for_key(info, param_name): + words = param_name.split("_") + + shortname_parts = [TrialShortNamer.shortname_for_word(info, word) for word in words] + + # We try to create a separatorless short name, but if there is a collision we have to fallback + # to a separated short name + separators = ["", "_"] + + for separator in separators: + shortname = separator.join(shortname_parts) + if shortname not in info["reverse_short_param"]: + info["short_param"][param_name] = shortname + info["reverse_short_param"][shortname] = param_name + return shortname + + return param_name + + @staticmethod + def add_new_param_name(info, param_name): + short_name = TrialShortNamer.shortname_for_key(info, param_name) + info["short_param"][param_name] = short_name + info["reverse_short_param"][short_name] = param_name + + @classmethod + def build_naming_info(cls): + if cls.NAMING_INFO is not None: + return + + info = { + "short_word": {}, + "reverse_short_word": {}, + "short_param": {}, + "reverse_short_param": {}, + } + + field_keys = list(cls.DEFAULTS.keys()) + + for k in field_keys: + cls.add_new_param_name(info, k) + + cls.NAMING_INFO = info + + @classmethod + def shortname(cls, params): + cls.build_naming_info() + assert cls.PREFIX is not None + name = [copy.copy(cls.PREFIX)] + + for k, v in params.items(): + if k not in cls.DEFAULTS: + raise Exception(f"You should provide a default value for the param name {k} with value {v}") + if v == cls.DEFAULTS[k]: + # The default value is not added to the name + continue + + key = cls.NAMING_INFO["short_param"][k] + + if isinstance(v, bool): + v = 1 if v else 0 + + sep = "" if isinstance(v, (int, float)) else "-" + e = f"{key}{sep}{v}" + name.append(e) + + return "_".join(name) + + @classmethod + def parse_repr(cls, repr): + repr = repr[len(cls.PREFIX) + 1 :] + if repr == "": + values = [] + else: + values = repr.split("_") + + parameters = {} + + for value in values: + if "-" in value: + p_k, p_v = value.split("-") + else: + p_k = re.sub("[0-9.]", "", value) + p_v = float(re.sub("[^0-9.]", "", value)) + + key = cls.NAMING_INFO["reverse_short_param"][p_k] + + parameters[key] = p_v + + for k in cls.DEFAULTS: + if k not in parameters: + parameters[k] = cls.DEFAULTS[k] + + return parameters diff --git a/modified/utils/hub.py b/modified/utils/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab1670ea37cf3c6301d49e50eece092af6f94e1 --- /dev/null +++ b/modified/utils/hub.py @@ -0,0 +1,1244 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Hub utilities: utilities related to download and cache models +""" +import json +import os +import re +import shutil +import sys +import tempfile +import traceback +import warnings +from concurrent import futures +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union +from urllib.parse import urlparse +from uuid import uuid4 + +import huggingface_hub +import requests +from huggingface_hub import ( + _CACHED_NO_EXIST, + CommitOperationAdd, + constants, + create_branch, + create_commit, + create_repo, + get_hf_file_metadata, + hf_hub_download, + hf_hub_url, + try_to_load_from_cache, +) +from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get +from huggingface_hub.utils import ( + EntryNotFoundError, + GatedRepoError, + HFValidationError, + LocalEntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + build_hf_headers, + hf_raise_for_status, + send_telemetry, +) +from huggingface_hub.utils._deprecation import _deprecate_method +from requests.exceptions import HTTPError + +from . import __version__, logging +from .generic import working_or_temp_dir +from .import_utils import ( + ENV_VARS_TRUE_VALUES, + _tf_version, + _torch_version, + is_tf_available, + is_torch_available, + is_training_run_on_sagemaker, +) +from .logging import tqdm + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +_is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False + + +def is_offline_mode(): + return _is_offline_mode + + +torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) +default_cache_path = constants.default_cache_path +old_default_cache_path = os.path.join(torch_cache_home, "transformers") + +# Determine default cache directory. Lots of legacy environment variables to ensure backward compatibility. +# The best way to set the cache path is with the environment variable HF_HOME. For more details, checkout this +# documentation page: https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables. +# +# In code, use `HF_HUB_CACHE` as the default cache path. This variable is set by the library and is guaranteed +# to be set to the right value. +# +# TODO: clean this for v5? +PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", constants.HF_HUB_CACHE) +PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) +TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) + +# Onetime move from the old location to the new one if no ENV variable has been set. +if ( + os.path.isdir(old_default_cache_path) + and not os.path.isdir(constants.HF_HUB_CACHE) + and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ + and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ + and "TRANSFORMERS_CACHE" not in os.environ +): + logger.warning( + "In Transformers v4.22.0, the default path to cache downloaded models changed from" + " '~/.cache/torch/transformers' to '~/.cache/huggingface/hub'. Since you don't seem to have" + " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" + " '~/.cache/huggingface/hub' to avoid redownloading models you have already in the cache. You should" + " only see this message once." + ) + shutil.move(old_default_cache_path, constants.HF_HUB_CACHE) + +HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(constants.HF_HOME, "modules")) +TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" +SESSION_ID = uuid4().hex +DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", constants.HF_HUB_DISABLE_TELEMETRY) in ENV_VARS_TRUE_VALUES + +# Add deprecation warning for old environment variables. +for key in ("PYTORCH_PRETRAINED_BERT_CACHE", "PYTORCH_TRANSFORMERS_CACHE", "TRANSFORMERS_CACHE"): + if os.getenv(key) is not None: + warnings.warn( + f"Using `{key}` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.", + FutureWarning, + ) +if os.getenv("DISABLE_TELEMETRY") is not None: + warnings.warn( + "Using `DISABLE_TELEMETRY` is deprecated and will be removed in v5 of Transformers. Use `HF_HUB_DISABLE_TELEMETRY` instead.", + FutureWarning, + ) + + +S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" +CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" + +_staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES +_default_endpoint = "https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co" + +HUGGINGFACE_CO_RESOLVE_ENDPOINT = _default_endpoint +if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: + warnings.warn( + "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " + "Transformers v5. Use `HF_ENDPOINT` instead.", + FutureWarning, + ) + HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) +HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", HUGGINGFACE_CO_RESOLVE_ENDPOINT) +HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" +HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" + + +def is_remote_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + + +# TODO: remove this once fully deprecated +# TODO? remove from './examples/research_projects/lxmert/utils.py' as well +# TODO? remove from './examples/research_projects/visual_bert/utils.py' as well +@_deprecate_method(version="4.39.0", message="This method is outdated and does not support the new cache system.") +def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: + """ + Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, + etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* + are added. + + Args: + cache_dir (`Union[str, Path]`, *optional*): + The cache directory to search for models within. Will default to the transformers cache if unset. + + Returns: + List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)` + """ + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + elif isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + if not os.path.isdir(cache_dir): + return [] + + cached_models = [] + for file in os.listdir(cache_dir): + if file.endswith(".json"): + meta_path = os.path.join(cache_dir, file) + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata["url"] + etag = metadata["etag"] + if url.endswith(".bin"): + size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6 + cached_models.append((url, etag, size_MB)) + + return cached_models + + +def define_sagemaker_information(): + try: + instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json() + dlc_container_used = instance_data["Image"] + dlc_tag = instance_data["Image"].split(":")[1] + except Exception: + dlc_container_used = None + dlc_tag = None + + sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}")) + runs_distributed_training = True if "sagemaker_distributed_dataparallel_enabled" in sagemaker_params else False + account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None + + sagemaker_object = { + "sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None), + "sm_region": os.getenv("AWS_REGION", None), + "sm_number_gpu": os.getenv("SM_NUM_GPUS", 0), + "sm_number_cpu": os.getenv("SM_NUM_CPUS", 0), + "sm_distributed_training": runs_distributed_training, + "sm_deep_learning_container": dlc_container_used, + "sm_deep_learning_container_tag": dlc_tag, + "sm_account_id": account_id, + } + return sagemaker_object + + +def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: + """ + Formats a user-agent string with basic info about a request. + """ + ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" + if is_torch_available(): + ua += f"; torch/{_torch_version}" + if is_tf_available(): + ua += f"; tensorflow/{_tf_version}" + if DISABLE_TELEMETRY: + return ua + "; telemetry/off" + if is_training_run_on_sagemaker(): + ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) + # CI will set this value to True + if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: + ua += "; is_ci/true" + if isinstance(user_agent, dict): + ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]) -> Optional[str]: + """ + Extracts the commit hash from a resolved filename toward a cache file. + """ + if resolved_file is None or commit_hash is not None: + return commit_hash + resolved_file = str(Path(resolved_file).as_posix()) + search = re.search(r"snapshots/([^/]+)/", resolved_file) + if search is None: + return None + commit_hash = search.groups()[0] + return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None + + +def cached_file( + path_or_repo_id: Union[str, os.PathLike], + filename: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", + repo_type: Optional[str] = None, + user_agent: Optional[Union[str, Dict[str, str]]] = None, + _raise_exceptions_for_missing_entries: bool = True, + _raise_exceptions_for_connection_errors: bool = True, + _commit_hash: Optional[str] = None, + **deprecated_kwargs, +) -> Optional[str]: + """ + Tries to locate a file in a local folder and repo, downloads and cache it if necessary. + + Args: + path_or_repo_id (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a model repo on huggingface.co. + - a path to a *directory* potentially containing the file. + filename (`str`): + The name of the file to locate in `path_or_repo`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). + + Examples: + + ```python + # Download a model weight from the Hub and cache it. + model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin") + ``` + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + # Private arguments + # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return + # None. + # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return + # None. + # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or + # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + if subfolder is None: + subfolder = "" + + path_or_repo_id = str(path_or_repo_id) + full_filename = os.path.join(subfolder, filename) + if os.path.isdir(path_or_repo_id): + resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) + if not os.path.isfile(resolved_file): + if _raise_exceptions_for_missing_entries: + raise EnvironmentError( + f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " + f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." + ) + else: + return None + return resolved_file + + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + if _commit_hash is not None and not force_download: + # If the file is cached under that commit hash, we return it directly. + resolved_file = try_to_load_from_cache( + path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type + ) + if resolved_file is not None: + if resolved_file is not _CACHED_NO_EXIST: + return resolved_file + elif not _raise_exceptions_for_missing_entries: + return None + else: + raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") + + user_agent = http_user_agent(user_agent) + try: + # Load from URL or cache if already cached + resolved_file = hf_hub_download( + path_or_repo_id, + filename, + subfolder=None if len(subfolder) == 0 else subfolder, + repo_type=repo_type, + revision=revision, + cache_dir=cache_dir, + user_agent=user_agent, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except GatedRepoError as e: + raise EnvironmentError( + "You are trying to access a gated repo.\nMake sure to request access at " + f"https://huggingface.co/{path_or_repo_id} and pass a token having permission to this repo either " + "by logging in with `huggingface-cli login` or by passing `token=`." + ) from e + except RepositoryNotFoundError as e: + raise EnvironmentError( + f"{path_or_repo_id} is not a local folder and is not a valid model identifier " + "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token " + "having permission to this repo either by logging in with `huggingface-cli login` or by passing " + "`token=`" + ) from e + except RevisionNotFoundError as e: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " + "for this model name. Check the model page at " + f"'https://huggingface.co/{path_or_repo_id}' for available revisions." + ) from e + except LocalEntryNotFoundError as e: + # We try to see if we have a cached version (not up to date): + resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) + if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: + return resolved_file + if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors: + return None + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" + f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" + f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" + " 'https://huggingface.co/docs/transformers/installation#offline-mode'." + ) from e + except EntryNotFoundError as e: + if not _raise_exceptions_for_missing_entries: + return None + if revision is None: + revision = "main" + raise EnvironmentError( + f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " + f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." + ) from e + except HTTPError as err: + # First we try to see if we have a cached version (not up to date): + resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) + if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: + return resolved_file + if not _raise_exceptions_for_connection_errors: + return None + + raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") + except HFValidationError as e: + raise EnvironmentError( + f"Incorrect path_or_model_id: '{path_or_repo_id}'. Please provide either the path to a local folder or the repo_id of a model on the Hub." + ) from e + return resolved_file + + +# TODO: deprecate `get_file_from_repo` or document it differently? +# Docstring is exactly the same as `cached_repo` but behavior is slightly different. If file is missing or if +# there is a connection error, `cached_repo` will return None while `get_file_from_repo` will raise an error. +# IMO we should keep only 1 method and have a single `raise_error` argument (to be discussed). +def get_file_from_repo( + path_or_repo: Union[str, os.PathLike], + filename: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", + **deprecated_kwargs, +): + """ + Tries to locate a file in a local folder and repo, downloads and cache it if necessary. + + Args: + path_or_repo (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a model repo on huggingface.co. + - a path to a *directory* potentially containing the file. + filename (`str`): + The name of the file to locate in `path_or_repo`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the + file does not exist. + + Examples: + + ```python + # Download a tokenizer configuration from huggingface.co and cache. + tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") + # This model does not have a tokenizer config so the result will be None. + tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") + ``` + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + return cached_file( + path_or_repo_id=path_or_repo, + filename=filename, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + + +def download_url(url, proxies=None): + """ + Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is + for deprecated behavior allowing to download config/models with a single url instead of using the Hub. + + Args: + url (`str`): The url of the file to download. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + + Returns: + `str`: The location of the temporary file where the url was downloaded. + """ + warnings.warn( + f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" + " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" + " that this is not compatible with the caching system (your file will be downloaded at each execution) or" + " multiple processes (each process will download the file in a different temporary file).", + FutureWarning, + ) + tmp_fd, tmp_file = tempfile.mkstemp() + with os.fdopen(tmp_fd, "wb") as f: + http_get(url, f, proxies=proxies) + return tmp_file + + +def has_file( + path_or_repo: Union[str, os.PathLike], + filename: str, + revision: Optional[str] = None, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + **deprecated_kwargs, +): + """ + Checks if a repo contains a given file without downloading it. Works for remote repos and local folders. + + + + This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for + this repo, but will return False for regular connection errors. + + + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if os.path.isdir(path_or_repo): + return os.path.isfile(os.path.join(path_or_repo, filename)) + + url = hf_hub_url(path_or_repo, filename=filename, revision=revision) + headers = build_hf_headers(token=token, user_agent=http_user_agent()) + + r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) + try: + hf_raise_for_status(r) + return True + except GatedRepoError as e: + logger.error(e) + raise EnvironmentError( + f"{path_or_repo} is a gated repository. Make sure to request access at " + f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by " + "logging in with `huggingface-cli login` or by passing `token=`." + ) from e + except RepositoryNotFoundError as e: + logger.error(e) + raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") + except RevisionNotFoundError as e: + logger.error(e) + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " + f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." + ) + except requests.HTTPError: + # We return false for EntryNotFoundError (logical) as well as any connection error. + return False + + +class PushToHubMixin: + """ + A Mixin containing the functionality to push a model or tokenizer to the hub. + """ + + def _create_repo( + self, + repo_id: str, + private: Optional[bool] = None, + token: Optional[Union[bool, str]] = None, + repo_url: Optional[str] = None, + organization: Optional[str] = None, + ) -> str: + """ + Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves + the token. + """ + if repo_url is not None: + warnings.warn( + "The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` " + "instead." + ) + if repo_id is not None: + raise ValueError( + "`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`." + ) + repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "") + if organization is not None: + warnings.warn( + "The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your " + "organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)." + ) + if not repo_id.startswith(organization): + if "/" in repo_id: + repo_id = repo_id.split("/")[-1] + repo_id = f"{organization}/{repo_id}" + + url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True) + return url.repo_id + + def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]): + """ + Returns the list of files with their last modification timestamp. + """ + return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)} + + def _upload_modified_files( + self, + working_dir: Union[str, os.PathLike], + repo_id: str, + files_timestamps: Dict[str, float], + commit_message: Optional[str] = None, + token: Optional[Union[bool, str]] = None, + create_pr: bool = False, + revision: str = None, + commit_description: str = None, + ): + """ + Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`. + """ + if commit_message is None: + if "Model" in self.__class__.__name__: + commit_message = "Upload model" + elif "Config" in self.__class__.__name__: + commit_message = "Upload config" + elif "Tokenizer" in self.__class__.__name__: + commit_message = "Upload tokenizer" + elif "FeatureExtractor" in self.__class__.__name__: + commit_message = "Upload feature extractor" + elif "Processor" in self.__class__.__name__: + commit_message = "Upload processor" + else: + commit_message = f"Upload {self.__class__.__name__}" + modified_files = [ + f + for f in os.listdir(working_dir) + if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f] + ] + + # filter for actual files + folders at the root level + modified_files = [ + f + for f in modified_files + if os.path.isfile(os.path.join(working_dir, f)) or os.path.isdir(os.path.join(working_dir, f)) + ] + + operations = [] + # upload standalone files + for file in modified_files: + if os.path.isdir(os.path.join(working_dir, file)): + # go over individual files of folder + for f in os.listdir(os.path.join(working_dir, file)): + operations.append( + CommitOperationAdd( + path_or_fileobj=os.path.join(working_dir, file, f), path_in_repo=os.path.join(file, f) + ) + ) + else: + operations.append( + CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file) + ) + + if revision is not None: + create_branch(repo_id=repo_id, branch=revision, token=token, exist_ok=True) + + logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}") + return create_commit( + repo_id=repo_id, + operations=operations, + commit_message=commit_message, + commit_description=commit_description, + token=token, + create_pr=create_pr, + revision=revision, + ) + + def push_to_hub( + self, + repo_id: str, + use_temp_dir: Optional[bool] = None, + commit_message: Optional[str] = None, + private: Optional[bool] = None, + token: Optional[Union[bool, str]] = None, + max_shard_size: Optional[Union[int, str]] = "5GB", + create_pr: bool = False, + safe_serialization: bool = True, + revision: str = None, + commit_description: str = None, + **deprecated_kwargs, + ) -> str: + """ + Upload the {object_files} to the 🤗 Model Hub. + + Parameters: + repo_id (`str`): + The name of the repository you want to push your {object} to. It should contain your organization name + when pushing to a given organization. + use_temp_dir (`bool`, *optional*): + Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. + Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. + commit_message (`str`, *optional*): + Message to commit while pushing. Will default to `"Upload {object}"`. + private (`bool`, *optional*): + Whether or not the repository created should be private. + token (`bool` or `str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` + is not specified. + max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): + Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard + will then be each of size lower than this size. If expressed as a string, needs to be digits followed + by a unit (like `"5MB"`). We default it to `"5GB"` so that users can easily load models on free-tier + Google Colab instances without any CPU OOM issues. + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether or not to convert the model weights in safetensors format for safer serialization. + revision (`str`, *optional*): + Branch to push the uploaded files to. + commit_description (`str`, *optional*): + The description of the commit that will be created + + Examples: + + ```python + from transformers import {object_class} + + {object} = {object_class}.from_pretrained("bert-base-cased") + + # Push the {object} to your namespace with the name "my-finetuned-bert". + {object}.push_to_hub("my-finetuned-bert") + + # Push the {object} to an organization with the name "my-finetuned-bert". + {object}.push_to_hub("huggingface/my-finetuned-bert") + ``` + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + repo_path_or_name = deprecated_kwargs.pop("repo_path_or_name", None) + if repo_path_or_name is not None: + # Should use `repo_id` instead of `repo_path_or_name`. When using `repo_path_or_name`, we try to infer + # repo_id from the folder path, if it exists. + warnings.warn( + "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " + "`repo_id` instead.", + FutureWarning, + ) + if repo_id is not None: + raise ValueError( + "`repo_id` and `repo_path_or_name` are both specified. Please set only the argument `repo_id`." + ) + if os.path.isdir(repo_path_or_name): + # repo_path: infer repo_id from the path + repo_id = repo_id.split(os.path.sep)[-1] + working_dir = repo_id + else: + # repo_name: use it as repo_id + repo_id = repo_path_or_name + working_dir = repo_id.split("/")[-1] + else: + # Repo_id is passed correctly: infer working_dir from it + working_dir = repo_id.split("/")[-1] + + # Deprecation warning will be sent after for repo_url and organization + repo_url = deprecated_kwargs.pop("repo_url", None) + organization = deprecated_kwargs.pop("organization", None) + + repo_id = self._create_repo( + repo_id, private=private, token=token, repo_url=repo_url, organization=organization + ) + + if use_temp_dir is None: + use_temp_dir = not os.path.isdir(working_dir) + + with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: + files_timestamps = self._get_files_timestamps(work_dir) + + # Save all files. + self.save_pretrained(work_dir, max_shard_size=max_shard_size, safe_serialization=safe_serialization) + + return self._upload_modified_files( + work_dir, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + create_pr=create_pr, + revision=revision, + commit_description=commit_description, + ) + + +def send_example_telemetry(example_name, *example_args, framework="pytorch"): + """ + Sends telemetry that helps tracking the examples use. + + Args: + example_name (`str`): The name of the example. + *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only + try to extract the model and dataset name from those. Nothing else is tracked. + framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example. + """ + if is_offline_mode(): + return + + data = {"example": example_name, "framework": framework} + for args in example_args: + args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None} + if "model_name_or_path" in args_as_dict: + model_name = args_as_dict["model_name_or_path"] + # Filter out local paths + if not os.path.isdir(model_name): + data["model_name"] = args_as_dict["model_name_or_path"] + if "dataset_name" in args_as_dict: + data["dataset_name"] = args_as_dict["dataset_name"] + elif "task_name" in args_as_dict: + # Extract script name from the example_name + script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "") + script_name = script_name.replace("_no_trainer", "") + data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" + + # Send telemetry in the background + send_telemetry( + topic="examples", library_name="transformers", library_version=__version__, user_agent=http_user_agent(data) + ) + + +def convert_file_size_to_int(size: Union[int, str]): + """ + Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). + + Args: + size (`int` or `str`): The size to convert. Will be directly returned if an `int`. + + Example: + ```py + >>> convert_file_size_to_int("1MiB") + 1048576 + ``` + """ + if isinstance(size, int): + return size + if size.upper().endswith("GIB"): + return int(size[:-3]) * (2**30) + if size.upper().endswith("MIB"): + return int(size[:-3]) * (2**20) + if size.upper().endswith("KIB"): + return int(size[:-3]) * (2**10) + if size.upper().endswith("GB"): + int_size = int(size[:-2]) * (10**9) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("MB"): + int_size = int(size[:-2]) * (10**6) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("KB"): + int_size = int(size[:-2]) * (10**3) + return int_size // 8 if size.endswith("b") else int_size + raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") + + +def get_checkpoint_shard_files( + pretrained_model_name_or_path, + index_filename, + cache_dir=None, + force_download=False, + proxies=None, + resume_download=False, + local_files_only=False, + token=None, + user_agent=None, + revision=None, + subfolder="", + _commit_hash=None, + **deprecated_kwargs, +): + """ + For a given model: + + - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the + Hub + - returns the list of paths to all the shards, as well as some metadata. + + For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the + index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). + """ + import json + + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if not os.path.isfile(index_filename): + raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") + + with open(index_filename, "r") as f: + index = json.loads(f.read()) + + shard_filenames = sorted(set(index["weight_map"].values())) + sharded_metadata = index["metadata"] + sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) + sharded_metadata["weight_map"] = index["weight_map"].copy() + + # First, let's deal with local folder. + if os.path.isdir(pretrained_model_name_or_path): + shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames] + return shard_filenames, sharded_metadata + + # At this stage pretrained_model_name_or_path is a model identifier on the Hub + cached_filenames = [] + # Check if the model is already cached or not. We only try the last checkpoint, this should cover most cases of + # downloaded (if interrupted). + last_shard = try_to_load_from_cache( + pretrained_model_name_or_path, shard_filenames[-1], cache_dir=cache_dir, revision=_commit_hash + ) + show_progress_bar = last_shard is None or force_download + for shard_filename in tqdm(shard_filenames, desc="Downloading shards", disable=not show_progress_bar): + try: + # Load from URL + cached_filename = cached_file( + pretrained_model_name_or_path, + shard_filename, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=_commit_hash, + ) + # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so + # we don't have to catch them here. + except EntryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {shard_filename} which is " + "required according to the checkpoint index." + ) + except HTTPError: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {shard_filename}. You should try" + " again after checking your internet connection." + ) + + cached_filenames.append(cached_filename) + + return cached_filenames, sharded_metadata + + +# All what is below is for conversion between old cache format and new cache format. + + +def get_all_cached_files(cache_dir=None): + """ + Returns a list for all files cached with appropriate metadata. + """ + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + else: + cache_dir = str(cache_dir) + if not os.path.isdir(cache_dir): + return [] + + cached_files = [] + for file in os.listdir(cache_dir): + meta_path = os.path.join(cache_dir, f"{file}.json") + if not os.path.isfile(meta_path): + continue + + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata["url"] + etag = metadata["etag"].replace('"', "") + cached_files.append({"file": file, "url": url, "etag": etag}) + + return cached_files + + +def extract_info_from_url(url): + """ + Extract repo_name, revision and filename from an url. + """ + search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url) + if search is None: + return None + repo, revision, filename = search.groups() + cache_repo = "--".join(["models"] + repo.split("/")) + return {"repo": cache_repo, "revision": revision, "filename": filename} + + +def clean_files_for(file): + """ + Remove, if they exist, file, file.json and file.lock + """ + for f in [file, f"{file}.json", f"{file}.lock"]: + if os.path.isfile(f): + os.remove(f) + + +def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): + """ + Move file to repo following the new huggingface hub cache organization. + """ + os.makedirs(repo, exist_ok=True) + + # refs + os.makedirs(os.path.join(repo, "refs"), exist_ok=True) + if revision != commit_hash: + ref_path = os.path.join(repo, "refs", revision) + with open(ref_path, "w") as f: + f.write(commit_hash) + + # blobs + os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) + blob_path = os.path.join(repo, "blobs", etag) + shutil.move(file, blob_path) + + # snapshots + os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True) + os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True) + pointer_path = os.path.join(repo, "snapshots", commit_hash, filename) + huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path) + clean_files_for(file) + + +def move_cache(cache_dir=None, new_cache_dir=None, token=None): + if new_cache_dir is None: + new_cache_dir = TRANSFORMERS_CACHE + if cache_dir is None: + # Migrate from old cache in .cache/huggingface/transformers + old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers" + if os.path.isdir(str(old_cache)): + cache_dir = str(old_cache) + else: + cache_dir = new_cache_dir + cached_files = get_all_cached_files(cache_dir=cache_dir) + logger.info(f"Moving {len(cached_files)} files to the new cache system") + + hub_metadata = {} + for file_info in tqdm(cached_files): + url = file_info.pop("url") + if url not in hub_metadata: + try: + hub_metadata[url] = get_hf_file_metadata(url, token=token) + except requests.HTTPError: + continue + + etag, commit_hash = hub_metadata[url].etag, hub_metadata[url].commit_hash + if etag is None or commit_hash is None: + continue + + if file_info["etag"] != etag: + # Cached file is not up to date, we just throw it as a new version will be downloaded anyway. + clean_files_for(os.path.join(cache_dir, file_info["file"])) + continue + + url_info = extract_info_from_url(url) + if url_info is None: + # Not a file from huggingface.co + continue + + repo = os.path.join(new_cache_dir, url_info["repo"]) + move_to_new_cache( + file=os.path.join(cache_dir, file_info["file"]), + repo=repo, + filename=url_info["filename"], + revision=url_info["revision"], + etag=etag, + commit_hash=commit_hash, + ) + + +class PushInProgress: + """ + Internal class to keep track of a push in progress (which might contain multiple `Future` jobs). + """ + + def __init__(self, jobs: Optional[futures.Future] = None) -> None: + self.jobs = [] if jobs is None else jobs + + def is_done(self): + return all(job.done() for job in self.jobs) + + def wait_until_done(self): + futures.wait(self.jobs) + + def cancel(self) -> None: + self.jobs = [ + job + for job in self.jobs + # Cancel the job if it wasn't started yet and remove cancelled/done jobs from the list + if not (job.cancel() or job.done()) + ] + + +cache_version_file = os.path.join(TRANSFORMERS_CACHE, "version.txt") +if not os.path.isfile(cache_version_file): + cache_version = 0 +else: + with open(cache_version_file) as f: + try: + cache_version = int(f.read()) + except ValueError: + cache_version = 0 + +cache_is_not_empty = os.path.isdir(TRANSFORMERS_CACHE) and len(os.listdir(TRANSFORMERS_CACHE)) > 0 + +if cache_version < 1 and cache_is_not_empty: + if is_offline_mode(): + logger.warning( + "You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local " + "cache seems to be the one of a previous version. It is very likely that all your calls to any " + "`from_pretrained()` method will fail. Remove the offline mode and enable internet connection to have " + "your cache be updated automatically, then you can go back to offline mode." + ) + else: + logger.warning( + "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a " + "one-time only operation. You can interrupt this and resume the migration later on by calling " + "`transformers.utils.move_cache()`." + ) + try: + if TRANSFORMERS_CACHE != constants.HF_HUB_CACHE: + # Users set some env variable to customize cache storage + move_cache(TRANSFORMERS_CACHE, TRANSFORMERS_CACHE) + else: + move_cache() + except Exception as e: + trace = "\n".join(traceback.format_tb(e.__traceback__)) + logger.error( + f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " + "file an issue at https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole " + "message and we will do our best to help." + ) + +if cache_version < 1: + try: + os.makedirs(TRANSFORMERS_CACHE, exist_ok=True) + with open(cache_version_file, "w") as f: + f.write("1") + except Exception: + logger.warning( + f"There was a problem when trying to write in your cache folder ({TRANSFORMERS_CACHE}). You should set " + "the environment variable TRANSFORMERS_CACHE to a writable directory." + ) diff --git a/modified/utils/import_utils.py b/modified/utils/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7530e84f4b158977a345cb8c692e58a6a94fce --- /dev/null +++ b/modified/utils/import_utils.py @@ -0,0 +1,1413 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Import utilities: Utilities related to imports and our lazy inits. +""" + +import importlib.metadata +import importlib.util +import json +import os +import shutil +import subprocess +import sys +import warnings +from collections import OrderedDict +from functools import lru_cache, wraps +from itertools import chain +from types import ModuleType +from typing import Any, Tuple, Union + +from packaging import version + +from . import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# TODO: This doesn't work for all packages (`bs4`, `faiss`, etc.) Talk to Sylvain to see how to do with it better. +def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]: + # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version + package_exists = importlib.util.find_spec(pkg_name) is not None + package_version = "N/A" + if package_exists: + try: + package_version = importlib.metadata.version(pkg_name) + package_exists = True + except importlib.metadata.PackageNotFoundError: + package_exists = False + logger.debug(f"Detected {pkg_name} version {package_version}") + if return_version: + return package_exists, package_version + else: + return package_exists + + +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() + +FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() + +# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. +TORCH_FX_REQUIRED_VERSION = version.parse("1.10") + + +_accelerate_available, _accelerate_version = _is_package_available("accelerate", return_version=True) +_apex_available = _is_package_available("apex") +_bitsandbytes_available = _is_package_available("bitsandbytes") +# `importlib.metadata.version` doesn't work with `bs4` but `beautifulsoup4`. For `importlib.util.find_spec`, reversed. +_bs4_available = importlib.util.find_spec("bs4") is not None +_coloredlogs_available = _is_package_available("coloredlogs") +# `importlib.metadata.util` doesn't work with `opencv-python-headless`. +_cv2_available = importlib.util.find_spec("cv2") is not None +_datasets_available = _is_package_available("datasets") +_decord_available = importlib.util.find_spec("decord") is not None +_detectron2_available = _is_package_available("detectron2") +# We need to check both `faiss` and `faiss-cpu`. +_faiss_available = importlib.util.find_spec("faiss") is not None +try: + _faiss_version = importlib.metadata.version("faiss") + logger.debug(f"Successfully imported faiss version {_faiss_version}") +except importlib.metadata.PackageNotFoundError: + try: + _faiss_version = importlib.metadata.version("faiss-cpu") + logger.debug(f"Successfully imported faiss version {_faiss_version}") + except importlib.metadata.PackageNotFoundError: + _faiss_available = False +_ftfy_available = _is_package_available("ftfy") +_ipex_available, _ipex_version = _is_package_available("intel_extension_for_pytorch", return_version=True) +_jieba_available = _is_package_available("jieba") +_jinja_available = _is_package_available("jinja2") +_kenlm_available = _is_package_available("kenlm") +_keras_nlp_available = _is_package_available("keras_nlp") +_levenshtein_available = _is_package_available("Levenshtein") +_librosa_available = _is_package_available("librosa") +_natten_available = _is_package_available("natten") +_nltk_available = _is_package_available("nltk") +_onnx_available = _is_package_available("onnx") +_openai_available = _is_package_available("openai") +_optimum_available = _is_package_available("optimum") +_auto_gptq_available = _is_package_available("auto_gptq") +# `importlib.metadata.version` doesn't work with `awq` +_auto_awq_available = importlib.util.find_spec("awq") is not None +_pandas_available = _is_package_available("pandas") +_peft_available = _is_package_available("peft") +_phonemizer_available = _is_package_available("phonemizer") +_psutil_available = _is_package_available("psutil") +_py3nvml_available = _is_package_available("py3nvml") +_pyctcdecode_available = _is_package_available("pyctcdecode") +_pytesseract_available = _is_package_available("pytesseract") +_pytest_available = _is_package_available("pytest") +_pytorch_quantization_available = _is_package_available("pytorch_quantization") +_rjieba_available = _is_package_available("rjieba") +_sacremoses_available = _is_package_available("sacremoses") +_safetensors_available = _is_package_available("safetensors") +_scipy_available = _is_package_available("scipy") +_sentencepiece_available = _is_package_available("sentencepiece") +_is_seqio_available = _is_package_available("seqio") +_sklearn_available = importlib.util.find_spec("sklearn") is not None +if _sklearn_available: + try: + importlib.metadata.version("scikit-learn") + except importlib.metadata.PackageNotFoundError: + _sklearn_available = False +_smdistributed_available = importlib.util.find_spec("smdistributed") is not None +_soundfile_available = _is_package_available("soundfile") +_spacy_available = _is_package_available("spacy") +_sudachipy_available = _is_package_available("sudachipy") +_tensorflow_probability_available = _is_package_available("tensorflow_probability") +_tensorflow_text_available = _is_package_available("tensorflow_text") +_tf2onnx_available = _is_package_available("tf2onnx") +_timm_available = _is_package_available("timm") +_tokenizers_available = _is_package_available("tokenizers") +_torchaudio_available = _is_package_available("torchaudio") +_torchdistx_available = _is_package_available("torchdistx") +_torchvision_available = _is_package_available("torchvision") + + +_torch_version = "N/A" +_torch_available = False +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + _torch_available, _torch_version = _is_package_available("torch", return_version=True) +else: + logger.info("Disabling PyTorch because USE_TF is set") + _torch_available = False + + +_tf_version = "N/A" +_tf_available = False +if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES: + _tf_available = True +else: + if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + # Note: _is_package_available("tensorflow") fails for tensorflow-cpu. Please test any changes to the line below + # with tensorflow-cpu to make sure it still works! + _tf_available = importlib.util.find_spec("tensorflow") is not None + if _tf_available: + candidates = ( + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "tf-nightly-rocm", + "intel-tensorflow", + "intel-tensorflow-avx512", + "tensorflow-rocm", + "tensorflow-macos", + "tensorflow-aarch64", + ) + _tf_version = None + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for pkg in candidates: + try: + _tf_version = importlib.metadata.version(pkg) + break + except importlib.metadata.PackageNotFoundError: + pass + _tf_available = _tf_version is not None + if _tf_available: + if version.parse(_tf_version) < version.parse("2"): + logger.info( + f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum." + ) + _tf_available = False + else: + logger.info("Disabling Tensorflow because USE_TORCH is set") + + +_essentia_available = importlib.util.find_spec("essentia") is not None +try: + _essentia_version = importlib.metadata.version("essentia") + logger.debug(f"Successfully imported essentia version {_essentia_version}") +except importlib.metadata.PackageNotFoundError: + _essentia_version = False + + +_pretty_midi_available = importlib.util.find_spec("pretty_midi") is not None +try: + _pretty_midi_version = importlib.metadata.version("pretty_midi") + logger.debug(f"Successfully imported pretty_midi version {_pretty_midi_version}") +except importlib.metadata.PackageNotFoundError: + _pretty_midi_available = False + + +ccl_version = "N/A" +_is_ccl_available = ( + importlib.util.find_spec("torch_ccl") is not None + or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None +) +try: + ccl_version = importlib.metadata.version("oneccl_bind_pt") + logger.debug(f"Detected oneccl_bind_pt version {ccl_version}") +except importlib.metadata.PackageNotFoundError: + _is_ccl_available = False + + +_flax_available = False +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + _flax_available, _flax_version = _is_package_available("flax", return_version=True) + if _flax_available: + _jax_available, _jax_version = _is_package_available("jax", return_version=True) + if _jax_available: + logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") + else: + _flax_available = _jax_available = False + _jax_version = _flax_version = "N/A" + + +_torch_fx_available = False +if _torch_available: + torch_version = version.parse(_torch_version) + _torch_fx_available = (torch_version.major, torch_version.minor) >= ( + TORCH_FX_REQUIRED_VERSION.major, + TORCH_FX_REQUIRED_VERSION.minor, + ) + + +def is_kenlm_available(): + return _kenlm_available + + +def is_cv2_available(): + return _cv2_available + + +def is_torch_available(): + return _torch_available + + +def get_torch_version(): + return _torch_version + + +def is_torch_sdpa_available(): + if not is_torch_available(): + return False + elif _torch_version == "N/A": + return False + + # NOTE: We require torch>=2.1 (and not torch>=2.0) to use SDPA in Transformers for two reasons: + # - Allow the global use of the `scale` argument introduced in https://github.com/pytorch/pytorch/pull/95259 + # - Memory-efficient attention supports arbitrary attention_mask: https://github.com/pytorch/pytorch/pull/104310 + # NOTE: We require torch>=2.1.1 to avoid a numerical issue in SDPA with non-contiguous inputs: https://github.com/pytorch/pytorch/issues/112577 + return version.parse(_torch_version) >= version.parse("2.1.1") + + +def is_torchvision_available(): + return _torchvision_available + + +def is_pyctcdecode_available(): + return _pyctcdecode_available + + +def is_librosa_available(): + return _librosa_available + + +def is_essentia_available(): + return _essentia_available + + +def is_pretty_midi_available(): + return _pretty_midi_available + + +def is_torch_cuda_available(): + if is_torch_available(): + import torch + + return torch.cuda.is_available() + else: + return False + + +def is_torch_mps_available(): + if is_torch_available(): + import torch + + if hasattr(torch.backends, "mps"): + return torch.backends.mps.is_available() + return False + + +def is_torch_bf16_gpu_available(): + if not is_torch_available(): + return False + + import torch + + return torch.cuda.is_available() and torch.cuda.is_bf16_supported() + + +def is_torch_bf16_cpu_available(): + if not is_torch_available(): + return False + + import torch + + try: + # multiple levels of AttributeError depending on the pytorch version so do them all in one check + _ = torch.cpu.amp.autocast + except AttributeError: + return False + + return True + + +def is_torch_bf16_available(): + # the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util + # has become ambiguous and therefore deprecated + warnings.warn( + "The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available " + "or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu", + FutureWarning, + ) + return is_torch_bf16_gpu_available() + + +@lru_cache() +def is_torch_fp16_available_on_device(device): + if not is_torch_available(): + return False + + import torch + + try: + x = torch.zeros(2, 2, dtype=torch.float16).to(device) + _ = x @ x + except: # noqa: E722 + # TODO: more precise exception matching, if possible. + # most backends should return `RuntimeError` however this is not guaranteed. + return False + + return True + + +@lru_cache() +def is_torch_bf16_available_on_device(device): + if not is_torch_available(): + return False + + import torch + + if device == "cuda": + return is_torch_bf16_gpu_available() + + try: + x = torch.zeros(2, 2, dtype=torch.bfloat16).to(device) + _ = x @ x + except: # noqa: E722 + # TODO: more precise exception matching, if possible. + # most backends should return `RuntimeError` however this is not guaranteed. + return False + + return True + + +def is_torch_tf32_available(): + if not is_torch_available(): + return False + + import torch + + if not torch.cuda.is_available() or torch.version.cuda is None: + return False + if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: + return False + if int(torch.version.cuda.split(".")[0]) < 11: + return False + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): + return False + + return True + + +def is_torch_fx_available(): + return _torch_fx_available + + +def is_peft_available(): + return _peft_available + + +def is_bs4_available(): + return _bs4_available + + +def is_tf_available(): + return _tf_available + + +def is_coloredlogs_available(): + return _coloredlogs_available + + +def is_tf2onnx_available(): + return _tf2onnx_available + + +def is_onnx_available(): + return _onnx_available + + +def is_openai_available(): + return _openai_available + + +def is_flax_available(): + return _flax_available + + +def is_ftfy_available(): + return _ftfy_available + + +@lru_cache() +def is_torch_tpu_available(check_device=True): + "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" + if not _torch_available: + return False + if importlib.util.find_spec("torch_xla") is not None: + if check_device: + # We need to check if `xla_device` can be found, will raise a RuntimeError if not + try: + import torch_xla.core.xla_model as xm + + _ = xm.xla_device() + return True + except RuntimeError: + return False + return True + return False + + +@lru_cache() +def is_torch_neuroncore_available(check_device=True): + if importlib.util.find_spec("torch_neuronx") is not None: + return is_torch_tpu_available(check_device) + return False + + +@lru_cache() +def is_torch_npu_available(check_device=False): + "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" + if not _torch_available or importlib.util.find_spec("torch_npu") is None: + return False + + import torch + import torch_npu # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no NPU is found + _ = torch.npu.device_count() + return torch.npu.is_available() + except RuntimeError: + return False + return hasattr(torch, "npu") and torch.npu.is_available() + + +def is_torchdynamo_available(): + if not is_torch_available(): + return False + try: + import torch._dynamo as dynamo # noqa: F401 + + return True + except Exception: + return False + + +def is_torch_compile_available(): + if not is_torch_available(): + return False + + import torch + + # We don't do any version check here to support nighlies marked as 1.14. Ultimately needs to check version against + # 2.0 but let's do it later. + return hasattr(torch, "compile") + + +def is_torchdynamo_compiling(): + if not is_torch_available(): + return False + try: + import torch._dynamo as dynamo # noqa: F401 + + return dynamo.is_compiling() + except Exception: + return False + + +def is_torch_tensorrt_fx_available(): + if importlib.util.find_spec("torch_tensorrt") is None: + return False + return importlib.util.find_spec("torch_tensorrt.fx") is not None + + +def is_datasets_available(): + return _datasets_available + + +def is_detectron2_available(): + return _detectron2_available + + +def is_rjieba_available(): + return _rjieba_available + + +def is_psutil_available(): + return _psutil_available + + +def is_py3nvml_available(): + return _py3nvml_available + + +def is_sacremoses_available(): + return _sacremoses_available + + +def is_apex_available(): + return _apex_available + + +def is_ninja_available(): + r""" + Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the + [ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise. + """ + try: + subprocess.check_output("ninja --version".split()) + except Exception: + return False + else: + return True + + +def is_ipex_available(): + def get_major_and_minor_from_version(full_version): + return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) + + if not is_torch_available() or not _ipex_available: + return False + + torch_major_and_minor = get_major_and_minor_from_version(_torch_version) + ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) + if torch_major_and_minor != ipex_major_and_minor: + logger.warning( + f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," + f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." + ) + return False + return True + + +@lru_cache +def is_torch_xpu_available(check_device=False): + "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" + if not is_ipex_available(): + return False + + import intel_extension_for_pytorch # noqa: F401 + import torch + + if check_device: + try: + # Will raise a RuntimeError if no XPU is found + _ = torch.xpu.device_count() + return torch.xpu.is_available() + except RuntimeError: + return False + return hasattr(torch, "xpu") and torch.xpu.is_available() + + +def is_bitsandbytes_available(): + if not is_torch_available(): + return False + + # bitsandbytes throws an error if cuda is not available + # let's avoid that by adding a simple check + import torch + + return _bitsandbytes_available and torch.cuda.is_available() + + +def is_flash_attn_2_available(): + if not is_torch_available(): + return False + + if not _is_package_available("flash_attn"): + return False + + # Let's add an extra check to see if cuda is available + import torch + + if not torch.cuda.is_available(): + return False + + if torch.version.cuda: + return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") + elif torch.version.hip: + # TODO: Bump the requirement to 2.1.0 once released in https://github.com/ROCmSoftwarePlatform/flash-attention + return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.0.4") + else: + return False + + +def is_flash_attn_greater_or_equal_2_10(): + if not _is_package_available("flash_attn"): + return False + + return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") + + +def is_flash_attn_available(): + logger.warning( + "Using `is_flash_attn_available` is deprecated and will be removed in v4.38. " + "Please use `is_flash_attn_2_available` instead." + ) + return is_flash_attn_2_available() + + +def is_torchdistx_available(): + return _torchdistx_available + + +def is_faiss_available(): + return _faiss_available + + +def is_scipy_available(): + return _scipy_available + + +def is_sklearn_available(): + return _sklearn_available + + +def is_sentencepiece_available(): + return _sentencepiece_available + + +def is_seqio_available(): + return _is_seqio_available + + +def is_protobuf_available(): + if importlib.util.find_spec("google") is None: + return False + return importlib.util.find_spec("google.protobuf") is not None + + +def is_accelerate_available(min_version: str = "0.21.0"): + if min_version is not None: + return _accelerate_available and version.parse(_accelerate_version) >= version.parse(min_version) + return _accelerate_available + + +def is_fsdp_available(min_version: str = "1.12.0"): + return is_torch_available() and version.parse(_torch_version) >= version.parse(min_version) + + +def is_optimum_available(): + return _optimum_available + + +def is_auto_awq_available(): + return _auto_awq_available + + +def is_auto_gptq_available(): + return _auto_gptq_available + + +def is_levenshtein_available(): + return _levenshtein_available + + +def is_optimum_neuron_available(): + return _optimum_available and _is_package_available("optimum.neuron") + + +def is_safetensors_available(): + return _safetensors_available + + +def is_tokenizers_available(): + return _tokenizers_available + + +def is_vision_available(): + _pil_available = importlib.util.find_spec("PIL") is not None + if _pil_available: + try: + package_version = importlib.metadata.version("Pillow") + except importlib.metadata.PackageNotFoundError: + try: + package_version = importlib.metadata.version("Pillow-SIMD") + except importlib.metadata.PackageNotFoundError: + return False + logger.debug(f"Detected PIL version {package_version}") + return _pil_available + + +def is_pytesseract_available(): + return _pytesseract_available + + +def is_pytest_available(): + return _pytest_available + + +def is_spacy_available(): + return _spacy_available + + +def is_tensorflow_text_available(): + return is_tf_available() and _tensorflow_text_available + + +def is_keras_nlp_available(): + return is_tensorflow_text_available() and _keras_nlp_available + + +def is_in_notebook(): + try: + # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py + get_ipython = sys.modules["IPython"].get_ipython + if "IPKernelApp" not in get_ipython().config: + raise ImportError("console") + if "VSCODE_PID" in os.environ: + raise ImportError("vscode") + if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0": + # Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook + # https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel + raise ImportError("databricks") + + return importlib.util.find_spec("IPython") is not None + except (AttributeError, ImportError, KeyError): + return False + + +def is_pytorch_quantization_available(): + return _pytorch_quantization_available + + +def is_tensorflow_probability_available(): + return _tensorflow_probability_available + + +def is_pandas_available(): + return _pandas_available + + +def is_sagemaker_dp_enabled(): + # Get the sagemaker specific env variable. + sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") + try: + # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". + sagemaker_params = json.loads(sagemaker_params) + if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): + return False + except json.JSONDecodeError: + return False + # Lastly, check if the `smdistributed` module is present. + return _smdistributed_available + + +def is_sagemaker_mp_enabled(): + # Get the sagemaker specific mp parameters from smp_options variable. + smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") + try: + # Parse it and check the field "partitions" is included, it is required for model parallel. + smp_options = json.loads(smp_options) + if "partitions" not in smp_options: + return False + except json.JSONDecodeError: + return False + + # Get the sagemaker specific framework parameters from mpi_options variable. + mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") + try: + # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". + mpi_options = json.loads(mpi_options) + if not mpi_options.get("sagemaker_mpi_enabled", False): + return False + except json.JSONDecodeError: + return False + # Lastly, check if the `smdistributed` module is present. + return _smdistributed_available + + +def is_training_run_on_sagemaker(): + return "SAGEMAKER_JOB_NAME" in os.environ + + +def is_soundfile_availble(): + return _soundfile_available + + +def is_timm_available(): + return _timm_available + + +def is_natten_available(): + return _natten_available + + +def is_nltk_available(): + return _nltk_available + + +def is_torchaudio_available(): + return _torchaudio_available + + +def is_speech_available(): + # For now this depends on torchaudio but the exact dependency might evolve in the future. + return _torchaudio_available + + +def is_phonemizer_available(): + return _phonemizer_available + + +def torch_only_method(fn): + def wrapper(*args, **kwargs): + if not _torch_available: + raise ImportError( + "You need to install pytorch to use this method or class, " + "or activate it with environment variables USE_TORCH=1 and USE_TF=0." + ) + else: + return fn(*args, **kwargs) + + return wrapper + + +def is_ccl_available(): + return _is_ccl_available + + +def is_decord_available(): + return _decord_available + + +def is_sudachi_available(): + return _sudachipy_available + + +def is_jumanpp_available(): + return (importlib.util.find_spec("rhoknp") is not None) and (shutil.which("jumanpp") is not None) + + +def is_cython_available(): + return importlib.util.find_spec("pyximport") is not None + + +def is_jieba_available(): + return _jieba_available + + +def is_jinja_available(): + return _jinja_available + + +# docstyle-ignore +CV2_IMPORT_ERROR = """ +{0} requires the OpenCV library but it was not found in your environment. You can install it with: +``` +pip install opencv-python +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +DATASETS_IMPORT_ERROR = """ +{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: +``` +pip install datasets +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install datasets +``` +then restarting your kernel. + +Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current +working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or +that python file if that's the case. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +TOKENIZERS_IMPORT_ERROR = """ +{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: +``` +pip install tokenizers +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install tokenizers +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PROTOBUF_IMPORT_ERROR = """ +{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +FAISS_IMPORT_ERROR = """ +{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PYTORCH_IMPORT_ERROR = """ +{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +TORCHVISION_IMPORT_ERROR = """ +{0} requires the Torchvision library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PYTORCH_IMPORT_ERROR_WITH_TF = """ +{0} requires the PyTorch library but it was not found in your environment. +However, we were able to find a TensorFlow installation. TensorFlow classes begin +with "TF", but are otherwise identically named to our PyTorch classes. This +means that the TF equivalent of the class you tried to import would be "TF{0}". +If you want to use TensorFlow, please use TF classes instead! + +If you really do want to use PyTorch please go to +https://pytorch.org/get-started/locally/ and follow the instructions that +match your environment. +""" + +# docstyle-ignore +TF_IMPORT_ERROR_WITH_PYTORCH = """ +{0} requires the TensorFlow library but it was not found in your environment. +However, we were able to find a PyTorch installation. PyTorch classes do not begin +with "TF", but are otherwise identically named to our TF classes. +If you want to use PyTorch, please use those classes instead! + +If you really do want to use TensorFlow, please follow the instructions on the +installation page https://www.tensorflow.org/install that match your environment. +""" + +# docstyle-ignore +BS4_IMPORT_ERROR = """ +{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: +`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SKLEARN_IMPORT_ERROR = """ +{0} requires the scikit-learn library but it was not found in your environment. You can install it with: +``` +pip install -U scikit-learn +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install -U scikit-learn +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +TENSORFLOW_IMPORT_ERROR = """ +{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the +installation page: https://www.tensorflow.org/install and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +DETECTRON2_IMPORT_ERROR = """ +{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +FLAX_IMPORT_ERROR = """ +{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/google/flax and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +FTFY_IMPORT_ERROR = """ +{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the +installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + +LEVENSHTEIN_IMPORT_ERROR = """ +{0} requires the python-Levenshtein library but it was not found in your environment. You can install it with pip: `pip +install python-Levenshtein`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PYTORCH_QUANTIZATION_IMPORT_ERROR = """ +{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip: +`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TENSORFLOW_PROBABILITY_IMPORT_ERROR = """ +{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as +explained here: https://github.com/tensorflow/probability. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TENSORFLOW_TEXT_IMPORT_ERROR = """ +{0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as +explained here: https://www.tensorflow.org/text/guide/tf_text_intro. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PANDAS_IMPORT_ERROR = """ +{0} requires the pandas library but it was not found in your environment. You can install it with pip as +explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PHONEMIZER_IMPORT_ERROR = """ +{0} requires the phonemizer library but it was not found in your environment. You can install it with pip: +`pip install phonemizer`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SACREMOSES_IMPORT_ERROR = """ +{0} requires the sacremoses library but it was not found in your environment. You can install it with pip: +`pip install sacremoses`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SCIPY_IMPORT_ERROR = """ +{0} requires the scipy library but it was not found in your environment. You can install it with pip: +`pip install scipy`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SPEECH_IMPORT_ERROR = """ +{0} requires the torchaudio library but it was not found in your environment. You can install it with pip: +`pip install torchaudio`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TIMM_IMPORT_ERROR = """ +{0} requires the timm library but it was not found in your environment. You can install it with pip: +`pip install timm`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +NATTEN_IMPORT_ERROR = """ +{0} requires the natten library but it was not found in your environment. You can install it by referring to: +shi-labs.com/natten . You can also install it with pip (may take longer to build): +`pip install natten`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +NLTK_IMPORT_ERROR = """ +{0} requires the NLTK library but it was not found in your environment. You can install it by referring to: +https://www.nltk.org/install.html. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +VISION_IMPORT_ERROR = """ +{0} requires the PIL library but it was not found in your environment. You can install it with pip: +`pip install pillow`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PYTESSERACT_IMPORT_ERROR = """ +{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip: +`pip install pytesseract`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PYCTCDECODE_IMPORT_ERROR = """ +{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip: +`pip install pyctcdecode`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +ACCELERATE_IMPORT_ERROR = """ +{0} requires the accelerate library but it was not found in your environment. You can install it with pip: +`pip install accelerate`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +CCL_IMPORT_ERROR = """ +{0} requires the torch ccl library but it was not found in your environment. You can install it with pip: +`pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +ESSENTIA_IMPORT_ERROR = """ +{0} requires essentia library. But that was not found in your environment. You can install them with pip: +`pip install essentia==2.1b6.dev1034` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +LIBROSA_IMPORT_ERROR = """ +{0} requires thes librosa library. But that was not found in your environment. You can install them with pip: +`pip install librosa` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PRETTY_MIDI_IMPORT_ERROR = """ +{0} requires thes pretty_midi library. But that was not found in your environment. You can install them with pip: +`pip install pretty_midi` +Please note that you may need to restart your runtime after installation. +""" + +DECORD_IMPORT_ERROR = """ +{0} requires the decord library but it was not found in your environment. You can install it with pip: `pip install +decord`. Please note that you may need to restart your runtime after installation. +""" + +CYTHON_IMPORT_ERROR = """ +{0} requires the Cython library but it was not found in your environment. You can install it with pip: `pip install +Cython`. Please note that you may need to restart your runtime after installation. +""" + +JIEBA_IMPORT_ERROR = """ +{0} requires the jieba library but it was not found in your environment. You can install it with pip: `pip install +jieba`. Please note that you may need to restart your runtime after installation. +""" + +PEFT_IMPORT_ERROR = """ +{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install +peft`. Please note that you may need to restart your runtime after installation. +""" + +JINJA_IMPORT_ERROR = """ +{0} requires the jinja library but it was not found in your environment. You can install it with pip: `pip install +jinja2`. Please note that you may need to restart your runtime after installation. +""" + +BACKENDS_MAPPING = OrderedDict( + [ + ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), + ("cv2", (is_cv2_available, CV2_IMPORT_ERROR)), + ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), + ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), + ("essentia", (is_essentia_available, ESSENTIA_IMPORT_ERROR)), + ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), + ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), + ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), + ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), + ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), + ("pretty_midi", (is_pretty_midi_available, PRETTY_MIDI_IMPORT_ERROR)), + ("levenshtein", (is_levenshtein_available, LEVENSHTEIN_IMPORT_ERROR)), + ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), + ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), + ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), + ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), + ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)), + ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), + ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), + ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), + ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), + ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), + ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), + ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), + ("natten", (is_natten_available, NATTEN_IMPORT_ERROR)), + ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), + ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), + ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), + ("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)), + ("vision", (is_vision_available, VISION_IMPORT_ERROR)), + ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), + ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), + ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)), + ("decord", (is_decord_available, DECORD_IMPORT_ERROR)), + ("cython", (is_cython_available, CYTHON_IMPORT_ERROR)), + ("jieba", (is_jieba_available, JIEBA_IMPORT_ERROR)), + ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), + ("jinja", (is_jinja_available, JINJA_IMPORT_ERROR)), + ] +) + + +def requires_backends(obj, backends): + if not isinstance(backends, (list, tuple)): + backends = [backends] + + name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ + + # Raise an error for users who might not realize that classes without "TF" are torch-only + if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available(): + raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name)) + + # Raise the inverse error for PyTorch users trying to load TF classes + if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available(): + raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name)) + + checks = (BACKENDS_MAPPING[backend] for backend in backends) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError("".join(failed)) + + +class DummyObject(type): + """ + Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by + `requires_backend` each time a user tries to access any method of that class. + """ + + def __getattribute__(cls, key): + if key.startswith("_") and key != "_from_config": + return super().__getattribute__(key) + requires_backends(cls, cls._backends) + + +def torch_required(func): + warnings.warn( + "The method `torch_required` is deprecated and will be removed in v4.36. Use `requires_backends` instead.", + FutureWarning, + ) + + # Chose a different decorator name than in tests so it's clear they are not the same. + @wraps(func) + def wrapper(*args, **kwargs): + if is_torch_available(): + return func(*args, **kwargs) + else: + raise ImportError(f"Method `{func.__name__}` requires PyTorch.") + + return wrapper + + +def tf_required(func): + warnings.warn( + "The method `tf_required` is deprecated and will be removed in v4.36. Use `requires_backends` instead.", + FutureWarning, + ) + + # Chose a different decorator name than in tests so it's clear they are not the same. + @wraps(func) + def wrapper(*args, **kwargs): + if is_tf_available(): + return func(*args, **kwargs) + else: + raise ImportError(f"Method `{func.__name__}` requires TF.") + + return wrapper + + +def is_torch_fx_proxy(x): + if is_torch_fx_available(): + import torch.fx + + return isinstance(x, torch.fx.Proxy) + return False + + +class _LazyModule(ModuleType): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested. + """ + + # Very heavily inspired by optuna.integration._IntegrationModule + # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py + def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): + super().__init__(name) + self._modules = set(import_structure.keys()) + self._class_to_module = {} + for key, values in import_structure.items(): + for value in values: + self._class_to_module[value] = key + # Needed for autocompletion in an IDE + self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = import_structure + + # Needed for autocompletion in an IDE + def __dir__(self): + result = super().__dir__() + # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether + # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. + for attr in self.__all__: + if attr not in result: + result.append(attr) + return result + + def __getattr__(self, name: str) -> Any: + if name in self._objects: + return self._objects[name] + if name in self._modules: + value = self._get_module(name) + elif name in self._class_to_module.keys(): + module = self._get_module(self._class_to_module[name]) + value = getattr(module, name) + else: + raise AttributeError(f"module {self.__name__} has no attribute {name}") + + setattr(self, name, value) + return value + + def _get_module(self, module_name: str): + try: + return importlib.import_module("." + module_name, self.__name__) + except Exception as e: + raise RuntimeError( + f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" + f" traceback):\n{e}" + ) from e + + def __reduce__(self): + return (self.__class__, (self._name, self.__file__, self._import_structure)) + + +class OptionalDependencyNotAvailable(BaseException): + """Internally used error class for signalling an optional dependency was not found.""" + + +def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: + """Imports transformers directly + + Args: + path (`str`): The path to the source file + file (`str`, optional): The file to join with the path. Defaults to "__init__.py". + + Returns: + `ModuleType`: The resulting imported module + """ + name = "transformers" + location = os.path.join(path, file) + spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path]) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + module = sys.modules[name] + return module diff --git a/modified/utils/logging.py b/modified/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..276fa6e8f85564fe7e61cfbd6357d39fc393289a --- /dev/null +++ b/modified/utils/logging.py @@ -0,0 +1,396 @@ +# coding=utf-8 +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Logging utilities.""" + + +import functools +import logging +import os +import sys +import threading +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from logging import captureWarnings as _captureWarnings +from typing import Optional + +import huggingface_hub.utils as hf_hub_utils +from tqdm import auto as tqdm_lib + + +_lock = threading.Lock() +_default_handler: Optional[logging.Handler] = None + +log_levels = { + "detail": logging.DEBUG, # will also print filename and line number + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + +_tqdm_active = True + + +def _get_default_logging_level(): + """ + If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is + not - fall back to `_default_log_level` + """ + env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + # This library has already configured the library root logger. + return + _default_handler = logging.StreamHandler() # Set sys.stderr as stream. + # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176 + if sys.stderr is None: + sys.stderr = open(os.devnull, "w") + + _default_handler.flush = sys.stderr.flush + + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(_default_handler) + library_root_logger.setLevel(_get_default_logging_level()) + # if logging level is debug, we add pathname and lineno to formatter for easy debugging + if os.getenv("TRANSFORMERS_VERBOSITY", None) == "detail": + formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s") + _default_handler.setFormatter(formatter) + + library_root_logger.propagate = False + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + + library_root_logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_log_levels_dict(): + return log_levels + + +def captureWarnings(capture): + """ + Calls the `captureWarnings` method from the logging library to enable management of the warnings emitted by the + `warnings` library. + + Read more about this method here: + https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module + + All warnings will be logged through the `py.warnings` logger. + + Careful: this method also adds a handler to this logger if it does not already have one, and updates the logging + level of that logger to the library's root logger. + """ + logger = get_logger("py.warnings") + + if not logger.handlers: + logger.addHandler(_default_handler) + + logger.setLevel(_get_library_root_logger().level) + + _captureWarnings(capture) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Return a logger with the specified name. + + This function is not supposed to be directly accessed unless you are writing a custom transformers module. + """ + + if name is None: + name = _get_library_name() + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """ + Return the current level for the 🤗 Transformers's root logger as an int. + + Returns: + `int`: The logging level. + + + + 🤗 Transformers has following logging levels: + + - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` + - 40: `transformers.logging.ERROR` + - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` + - 20: `transformers.logging.INFO` + - 10: `transformers.logging.DEBUG` + + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Set the verbosity level for the 🤗 Transformers's root logger. + + Args: + verbosity (`int`): + Logging level, e.g., one of: + + - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` + - `transformers.logging.ERROR` + - `transformers.logging.WARNING` or `transformers.logging.WARN` + - `transformers.logging.INFO` + - `transformers.logging.DEBUG` + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """Set the verbosity to the `INFO` level.""" + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """Set the verbosity to the `WARNING` level.""" + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """Set the verbosity to the `DEBUG` level.""" + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """Set the verbosity to the `ERROR` level.""" + return set_verbosity(ERROR) + + +def disable_default_handler() -> None: + """Disable the default handler of the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().removeHandler(_default_handler) + + +def enable_default_handler() -> None: + """Enable the default handler of the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().addHandler(_default_handler) + + +def add_handler(handler: logging.Handler) -> None: + """adds a handler to the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert handler is not None + _get_library_root_logger().addHandler(handler) + + +def remove_handler(handler: logging.Handler) -> None: + """removes given handler from the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert handler is not None and handler not in _get_library_root_logger().handlers + _get_library_root_logger().removeHandler(handler) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is disabled by default. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to + prevent double logging if the root logger has been configured. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = True + + +def enable_explicit_format() -> None: + """ + Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: + ``` + [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE + ``` + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") + handler.setFormatter(formatter) + + +def reset_format() -> None: + """ + Resets the formatting for HuggingFace Transformers's loggers. + + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + handler.setFormatter(None) + + +def warning_advice(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this + warning will not be printed + """ + no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False) + if no_advisory_warnings: + return + self.warning(*args, **kwargs) + + +logging.Logger.warning_advice = warning_advice + + +@functools.lru_cache(None) +def warning_once(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but will emit the warning with the same message only once + + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. + The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to + another type of cache that includes the caller frame information in the hashing function. + """ + self.warning(*args, **kwargs) + + +logging.Logger.warning_once = warning_once + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar(): + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + hf_hub_utils.enable_progress_bars() + + +def disable_progress_bar(): + """Disable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False + hf_hub_utils.disable_progress_bars() diff --git a/modified/utils/model_parallel_utils.py b/modified/utils/model_parallel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec79a5e23cbc976e2cb26934abe94e7bbe890d7 --- /dev/null +++ b/modified/utils/model_parallel_utils.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from math import ceil + + +def assert_device_map(device_map, num_blocks): + blocks = list(range(0, num_blocks)) + + device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] + + # Duplicate check + duplicate_blocks = [] + for i in device_map_blocks: + if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: + duplicate_blocks.append(i) + # Missing blocks + missing_blocks = [i for i in blocks if i not in device_map_blocks] + extra_blocks = [i for i in device_map_blocks if i not in blocks] + + if len(duplicate_blocks) != 0: + raise ValueError( + "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." + " These attention blocks were specified more than once: " + str(duplicate_blocks) + ) + if len(missing_blocks) != 0: + raise ValueError( + "There are attention blocks for this model that are not specified in the device_map. Add these attention " + "blocks to a device on the device_map: " + str(missing_blocks) + ) + if len(extra_blocks) != 0: + raise ValueError( + "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + + str(extra_blocks) + ) + + +def get_device_map(n_layers, devices): + """Returns a dictionary of layers distributed evenly across all devices.""" + layers = list(range(n_layers)) + n_blocks = int(ceil(n_layers / len(devices))) + layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)] + + return dict(zip(devices, layers_list)) diff --git a/modified/utils/notebook.py b/modified/utils/notebook.py new file mode 100644 index 0000000000000000000000000000000000000000..f7396642732e585e450f5b345e9dcf74eb294b78 --- /dev/null +++ b/modified/utils/notebook.py @@ -0,0 +1,378 @@ +# coding=utf-8 +# Copyright 2020 Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import time +from typing import Optional + +import IPython.display as disp + +from ..trainer_callback import TrainerCallback +from ..trainer_utils import IntervalStrategy, has_length + + +def format_time(t): + "Format `t` (in seconds) to (h):mm:ss" + t = int(t) + h, m, s = t // 3600, (t // 60) % 60, t % 60 + return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}" + + +def html_progress_bar(value, total, prefix, label, width=300): + # docstyle-ignore + return f""" +
+ {prefix} + + {label} +
+ """ + + +def text_to_html_table(items): + "Put the texts in `items` in an HTML table." + html_code = """\n""" + html_code += """ \n \n""" + for i in items[0]: + html_code += f" \n" + html_code += " \n \n \n" + for line in items[1:]: + html_code += " \n" + for elt in line: + elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt) + html_code += f" \n" + html_code += " \n" + html_code += " \n
{i}
{elt}

" + return html_code + + +class NotebookProgressBar: + """ + A progress par for display in a notebook. + + Class attributes (overridden by derived classes) + + - **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`. + - **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed + `update_every` seconds. The progress bar uses the average time passed up until now to guess the next value + for which it will call the update. + + Args: + total (`int`): + The total number of iterations to reach. + prefix (`str`, *optional*): + A prefix to add before the progress bar. + leave (`bool`, *optional*, defaults to `True`): + Whether or not to leave the progress bar once it's completed. You can always call the + [`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear. + parent ([`~notebook.NotebookTrainingTracker`], *optional*): + A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle + their display. If set, the object passed must have a `display()` method. + width (`int`, *optional*, defaults to 300): + The width (in pixels) that the bar will take. + + Example: + + ```python + import time + + pbar = NotebookProgressBar(100) + for val in range(100): + pbar.update(val) + time.sleep(0.07) + pbar.update(100) + ```""" + + warmup = 5 + update_every = 0.2 + + def __init__( + self, + total: int, + prefix: Optional[str] = None, + leave: bool = True, + parent: Optional["NotebookTrainingTracker"] = None, + width: int = 300, + ): + self.total = total + self.prefix = "" if prefix is None else prefix + self.leave = leave + self.parent = parent + self.width = width + self.last_value = None + self.comment = None + self.output = None + + def update(self, value: int, force_update: bool = False, comment: str = None): + """ + The main method to update the progress bar to `value`. + + Args: + value (`int`): + The value to use. Must be between 0 and `total`. + force_update (`bool`, *optional*, defaults to `False`): + Whether or not to force and update of the internal state and display (by default, the bar will wait for + `value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute + since the last update to avoid adding boilerplate). + comment (`str`, *optional*): + A comment to add on the left of the progress bar. + """ + self.value = value + if comment is not None: + self.comment = comment + if self.last_value is None: + self.start_time = self.last_time = time.time() + self.start_value = self.last_value = value + self.elapsed_time = self.predicted_remaining = None + self.first_calls = self.warmup + self.wait_for = 1 + self.update_bar(value) + elif value <= self.last_value and not force_update: + return + elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total): + if self.first_calls > 0: + self.first_calls -= 1 + current_time = time.time() + self.elapsed_time = current_time - self.start_time + # We could have value = self.start_value if the update is called twixe with the same start value. + if value > self.start_value: + self.average_time_per_item = self.elapsed_time / (value - self.start_value) + else: + self.average_time_per_item = None + if value >= self.total: + value = self.total + self.predicted_remaining = None + if not self.leave: + self.close() + elif self.average_time_per_item is not None: + self.predicted_remaining = self.average_time_per_item * (self.total - value) + self.update_bar(value) + self.last_value = value + self.last_time = current_time + if (self.average_time_per_item is None) or (self.average_time_per_item == 0): + self.wait_for = 1 + else: + self.wait_for = max(int(self.update_every / self.average_time_per_item), 1) + + def update_bar(self, value, comment=None): + spaced_value = " " * (len(str(self.total)) - len(str(value))) + str(value) + if self.elapsed_time is None: + self.label = f"[{spaced_value}/{self.total} : < :" + elif self.predicted_remaining is None: + self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}" + else: + self.label = ( + f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <" + f" {format_time(self.predicted_remaining)}" + ) + if self.average_time_per_item == 0: + self.label += ", +inf it/s" + else: + self.label += f", {1/self.average_time_per_item:.2f} it/s" + + self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]" + self.display() + + def display(self): + self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) + if self.parent is not None: + # If this is a child bar, the parent will take care of the display. + self.parent.display() + return + if self.output is None: + self.output = disp.display(disp.HTML(self.html_code), display_id=True) + else: + self.output.update(disp.HTML(self.html_code)) + + def close(self): + "Closes the progress bar." + if self.parent is None and self.output is not None: + self.output.update(disp.HTML("")) + + +class NotebookTrainingTracker(NotebookProgressBar): + """ + An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. + + Args: + num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*): + The list of column names for the metrics table (will be inferred from the first call to + [`~utils.notebook.NotebookTrainingTracker.write_line`] if not set). + """ + + def __init__(self, num_steps, column_names=None): + super().__init__(num_steps) + self.inner_table = None if column_names is None else [column_names] + self.child_bar = None + + def display(self): + self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) + if self.inner_table is not None: + self.html_code += text_to_html_table(self.inner_table) + if self.child_bar is not None: + self.html_code += self.child_bar.html_code + if self.output is None: + self.output = disp.display(disp.HTML(self.html_code), display_id=True) + else: + self.output.update(disp.HTML(self.html_code)) + + def write_line(self, values): + """ + Write the values in the inner table. + + Args: + values (`Dict[str, float]`): The values to display. + """ + if self.inner_table is None: + self.inner_table = [list(values.keys()), list(values.values())] + else: + columns = self.inner_table[0] + for key in values.keys(): + if key not in columns: + columns.append(key) + self.inner_table[0] = columns + if len(self.inner_table) > 1: + last_values = self.inner_table[-1] + first_column = self.inner_table[0][0] + if last_values[0] != values[first_column]: + # write new line + self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) + else: + # update last line + new_values = values + for c in columns: + if c not in new_values.keys(): + new_values[c] = last_values[columns.index(c)] + self.inner_table[-1] = [new_values[c] for c in columns] + else: + self.inner_table.append([values[c] for c in columns]) + + def add_child(self, total, prefix=None, width=300): + """ + Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be + easily updated). + + Args: + total (`int`): The number of iterations for the child progress bar. + prefix (`str`, *optional*): A prefix to write on the left of the progress bar. + width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar. + """ + self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width) + return self.child_bar + + def remove_child(self): + """ + Closes the child progress bar. + """ + self.child_bar = None + self.display() + + +class NotebookProgressCallback(TrainerCallback): + """ + A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for Jupyter Notebooks or + Google colab. + """ + + def __init__(self): + self.training_tracker = None + self.prediction_bar = None + self._force_next_update = False + + def on_train_begin(self, args, state, control, **kwargs): + self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" + self.training_loss = 0 + self.last_log = 0 + column_names = [self.first_column] + ["Training Loss"] + if args.evaluation_strategy != IntervalStrategy.NO: + column_names.append("Validation Loss") + self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) + + def on_step_end(self, args, state, control, **kwargs): + epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}" + self.training_tracker.update( + state.global_step + 1, + comment=f"Epoch {epoch}/{state.num_train_epochs}", + force_update=self._force_next_update, + ) + self._force_next_update = False + + def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): + if not has_length(eval_dataloader): + return + if self.prediction_bar is None: + if self.training_tracker is not None: + self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader)) + else: + self.prediction_bar = NotebookProgressBar(len(eval_dataloader)) + self.prediction_bar.update(1) + else: + self.prediction_bar.update(self.prediction_bar.value + 1) + + def on_predict(self, args, state, control, **kwargs): + if self.prediction_bar is not None: + self.prediction_bar.close() + self.prediction_bar = None + + def on_log(self, args, state, control, logs=None, **kwargs): + # Only for when there is no evaluation + if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: + values = {"Training Loss": logs["loss"]} + # First column is necessarily Step sine we're not in epoch eval strategy + values["Step"] = state.global_step + self.training_tracker.write_line(values) + + def on_evaluate(self, args, state, control, metrics=None, **kwargs): + if self.training_tracker is not None: + values = {"Training Loss": "No log", "Validation Loss": "No log"} + for log in reversed(state.log_history): + if "loss" in log: + values["Training Loss"] = log["loss"] + break + + if self.first_column == "Epoch": + values["Epoch"] = int(state.epoch) + else: + values["Step"] = state.global_step + metric_key_prefix = "eval" + for k in metrics: + if k.endswith("_loss"): + metric_key_prefix = re.sub(r"\_loss$", "", k) + _ = metrics.pop("total_flos", None) + _ = metrics.pop("epoch", None) + _ = metrics.pop(f"{metric_key_prefix}_runtime", None) + _ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None) + _ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None) + _ = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", None) + for k, v in metrics.items(): + splits = k.split("_") + name = " ".join([part.capitalize() for part in splits[1:]]) + if name == "Loss": + # Single dataset + name = "Validation Loss" + values[name] = v + self.training_tracker.write_line(values) + self.training_tracker.remove_child() + self.prediction_bar = None + # Evaluation takes a long time so we should force the next update. + self._force_next_update = True + + def on_train_end(self, args, state, control, **kwargs): + self.training_tracker.update( + state.global_step, + comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", + force_update=True, + ) + self.training_tracker = None diff --git a/modified/utils/peft_utils.py b/modified/utils/peft_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7830acd0b4d29025837c54de19aa3d68ee295ddf --- /dev/null +++ b/modified/utils/peft_utils.py @@ -0,0 +1,123 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os +from typing import Dict, Optional, Union + +from packaging import version + +from .hub import cached_file +from .import_utils import is_peft_available + + +ADAPTER_CONFIG_NAME = "adapter_config.json" +ADAPTER_WEIGHTS_NAME = "adapter_model.bin" +ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors" + + +def find_adapter_config_file( + model_id: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", + _commit_hash: Optional[str] = None, +) -> Optional[str]: + r""" + Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter + config file if it is, None otherwise. + + Args: + model_id (`str`): + The identifier of the model to look for, can be either a local path or an id to the repository on the Hub. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + """ + adapter_cached_filename = None + if model_id is None: + return None + elif os.path.isdir(model_id): + list_remote_files = os.listdir(model_id) + if ADAPTER_CONFIG_NAME in list_remote_files: + adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME) + else: + adapter_cached_filename = cached_file( + model_id, + ADAPTER_CONFIG_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + _commit_hash=_commit_hash, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + + return adapter_cached_filename + + +def check_peft_version(min_version: str) -> None: + r""" + Checks if the version of PEFT is compatible. + + Args: + version (`str`): + The version of PEFT to check against. + """ + if not is_peft_available(): + raise ValueError("PEFT is not installed. Please install it with `pip install peft`") + + is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version) + + if not is_peft_version_compatible: + raise ValueError( + f"The version of PEFT you are using is not compatible, please use a version that is greater" + f" than {min_version}" + ) diff --git a/modified/utils/quantization_config.py b/modified/utils/quantization_config.py new file mode 100644 index 0000000000000000000000000000000000000000..4f268ab6bc7102e4254a11e5fd4db240a409903d --- /dev/null +++ b/modified/utils/quantization_config.py @@ -0,0 +1,645 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import importlib.metadata +import json +import os +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from packaging import version + +from ..utils import is_auto_awq_available, is_torch_available, logging + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class QuantizationMethod(str, Enum): + BITS_AND_BYTES = "bitsandbytes" + GPTQ = "gptq" + AWQ = "awq" + + +class AWQLinearVersion(str, Enum): + GEMM = "gemm" + GEMV = "gemv" + + @staticmethod + def from_str(version: str): + version = version.lower() + if version == "gemm": + return AWQLinearVersion.GEMM + elif version == "gemv": + return AWQLinearVersion.GEMV + else: + raise ValueError(f"Unknown AWQLinearVersion {version}") + + +class AwqBackendPackingMethod(str, Enum): + AUTOAWQ = "autoawq" + LLMAWQ = "llm-awq" + + +@dataclass +class QuantizationConfigMixin: + """ + Mixin class for quantization config + """ + + quant_method: QuantizationMethod + + @classmethod + def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs): + """ + Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters. + + Args: + config_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the configuration object. + return_unused_kwargs (`bool`,*optional*, defaults to `False`): + Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in + `PreTrainedModel`. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the configuration object. + + Returns: + [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters. + """ + + config = cls(**config_dict) + + to_remove = [] + for key, value in kwargs.items(): + if hasattr(config, key): + setattr(config, key, value) + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + if return_unused_kwargs: + return config, kwargs + else: + return config + + def to_json_file(self, json_file_path: Union[str, os.PathLike]): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this configuration instance's parameters will be saved. + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default + `QuantizationConfig()` is serialized to JSON file. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + config_dict = self.to_dict() + json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + writer.write(json_string) + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + return copy.deepcopy(self.__dict__) + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + def to_json_string(self, use_diff: bool = True) -> str: + """ + Serializes this instance to a JSON string. + + Args: + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` + is serialized to JSON string. + + Returns: + `str`: String containing all the attributes that make up this configuration instance in JSON format. + """ + if use_diff is True: + config_dict = self.to_diff_dict() + else: + config_dict = self.to_dict() + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + +@dataclass +class BitsAndBytesConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `bitsandbytes`. + + This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive. + + Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`, + then more arguments will be added to this class. + + Args: + load_in_8bit (`bool`, *optional*, defaults to `False`): + This flag is used to enable 8-bit quantization with LLM.int8(). + load_in_4bit (`bool`, *optional*, defaults to `False`): + This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from + `bitsandbytes`. + llm_int8_threshold (`float`, *optional*, defaults to 6.0): + This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix + Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value + that is above this threshold will be considered an outlier and the operation on those values will be done + in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but + there are some exceptional systematic outliers that are very differently distributed for large models. + These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of + magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, + but a lower threshold might be needed for more unstable models (small models, fine-tuning). + llm_int8_skip_modules (`List[str]`, *optional*): + An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as + Jukebox that has several heads in different places and not necessarily at the last position. For example + for `CausalLM` models, the last `lm_head` is kept in its original `dtype`. + llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`): + This flag is used for advanced use cases and users that are aware of this feature. If you want to split + your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use + this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8 + operations will not be run on CPU. + llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`): + This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not + have to be converted back and forth for the backward pass. + bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`): + This sets the computational type which might be different than the input time. For example, inputs might be + fp32, but computation can be set to bf16 for speedups. + bnb_4bit_quant_type (`str`, *optional*, defaults to `"fp4"`): + This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types + which are specified by `fp4` or `nf4`. + bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`): + This flag is used for nested quantization where the quantization constants from the first quantization are + quantized again. + kwargs (`Dict[str, Any]`, *optional*): + Additional parameters from which to initialize the configuration object. + """ + + def __init__( + self, + load_in_8bit=False, + load_in_4bit=False, + llm_int8_threshold=6.0, + llm_int8_skip_modules=None, + llm_int8_enable_fp32_cpu_offload=False, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=None, + bnb_4bit_quant_type="fp4", + bnb_4bit_use_double_quant=False, + **kwargs, + ): + self.quant_method = QuantizationMethod.BITS_AND_BYTES + self.load_in_8bit = load_in_8bit + self.load_in_4bit = load_in_4bit + self.llm_int8_threshold = llm_int8_threshold + self.llm_int8_skip_modules = llm_int8_skip_modules + self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload + self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight + self.bnb_4bit_quant_type = bnb_4bit_quant_type + self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant + + if bnb_4bit_compute_dtype is None: + self.bnb_4bit_compute_dtype = torch.float32 + elif isinstance(bnb_4bit_compute_dtype, str): + self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype) + elif isinstance(bnb_4bit_compute_dtype, torch.dtype): + self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype + else: + raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") + + self.post_init() + + def post_init(self): + r""" + Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. + """ + if not isinstance(self.llm_int8_threshold, float): + raise ValueError("llm_int8_threshold must be a float") + + if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): + raise ValueError("llm_int8_skip_modules must be a list of strings") + if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): + raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") + + if not isinstance(self.llm_int8_has_fp16_weight, bool): + raise ValueError("llm_int8_has_fp16_weight must be a boolean") + + if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): + raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") + + if not isinstance(self.bnb_4bit_quant_type, str): + raise ValueError("bnb_4bit_quant_type must be a string") + + if not isinstance(self.bnb_4bit_use_double_quant, bool): + raise ValueError("bnb_4bit_use_double_quant must be a boolean") + + if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( + "0.39.0" + ): + raise ValueError( + "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" + ) + + def is_quantizable(self): + r""" + Returns `True` if the model is quantizable, `False` otherwise. + """ + return self.load_in_8bit or self.load_in_4bit + + def quantization_method(self): + r""" + This method returns the quantization method used for the model. If the model is not quantizable, it returns + `None`. + """ + if self.load_in_8bit: + return "llm_int8" + elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4": + return "fp4" + elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4": + return "nf4" + else: + return None + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] + + return output + + def __repr__(self): + config_dict = self.to_dict() + return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" + + def to_diff_dict(self) -> Dict[str, Any]: + """ + Removes all attributes from config which correspond to the default config attributes for better readability and + serializes to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, + """ + config_dict = self.to_dict() + + # get the default config dict + default_config_dict = BitsAndBytesConfig().to_dict() + + serializable_config_dict = {} + + # only serialize values that differ from the default config + for key, value in config_dict.items(): + if value != default_config_dict[key]: + serializable_config_dict[key] = value + + return serializable_config_dict + + +class ExllamaVersion(int, Enum): + ONE = 1 + TWO = 2 + + +@dataclass +class GPTQConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `optimum` api for gptq quantization relying on auto_gptq backend. + + Args: + bits (`int`): + The number of bits to quantize to, supported numbers are (2, 3, 4, 8). + tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*): + The tokenizer used to process the dataset. You can pass either: + - A custom tokenizer object. + - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. + Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + user or organization name, like `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved + using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + dataset (`Union[List[str]]`, *optional*): + The dataset used for quantization. You can provide your own dataset in a list of string or just use the + original datasets used in GPTQ paper ['wikitext2','c4','c4-new','ptb','ptb-new'] + group_size (`int`, *optional*, defaults to 128): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. + damp_percent (`float`, *optional*, defaults to 0.1): + The percent of the average Hessian diagonal to use for dampening. Recommended value is 0.1. + desc_act (`bool`, *optional*, defaults to `False`): + Whether to quantize columns in order of decreasing activation size. Setting it to False can significantly + speed up inference but the perplexity may become slightly worse. Also known as act-order. + sym (`bool`, *optional*, defaults to `True`): + Whether to use symetric quantization. + true_sequential (`bool`, *optional*, defaults to `True`): + Whether to perform sequential quantization even within a single Transformer block. Instead of quantizing + the entire block at once, we perform layer-wise quantization. As a result, each layer undergoes + quantization using inputs that have passed through the previously quantized layers. + use_cuda_fp16 (`bool`, *optional*, defaults to `False`): + Whether or not to use optimized cuda kernel for fp16 model. Need to have model in fp16. + model_seqlen (`int`, *optional*): + The maximum sequence length that the model can take. + block_name_to_quantize (`str`, *optional*): + The transformers block name to quantize. + module_name_preceding_first_block (`List[str]`, *optional*): + The layers that are preceding the first Transformer block. + batch_size (`int`, *optional*, defaults to 1): + The batch size used when processing the dataset + pad_token_id (`int`, *optional*): + The pad token id. Needed to prepare the dataset when `batch_size` > 1. + use_exllama (`bool`, *optional*): + Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4. + max_input_length (`int`, *optional*): + The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input + length. It is specific to the exllama backend with act-order. + exllama_config (`Dict[str, Any]`, *optional*): + The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults + to `{"version": 1}` if unset. + cache_block_outputs (`bool`, *optional*, defaults to `True`): + Whether to cache block outputs to reuse as inputs for the succeeding block. + """ + + def __init__( + self, + bits: int, + tokenizer: Any = None, + dataset: Optional[Union[List[str], str]] = None, + group_size: int = 128, + damp_percent: float = 0.1, + desc_act: bool = False, + sym: bool = True, + true_sequential: bool = True, + use_cuda_fp16: bool = False, + model_seqlen: Optional[int] = None, + block_name_to_quantize: Optional[str] = None, + module_name_preceding_first_block: Optional[List[str]] = None, + batch_size: int = 1, + pad_token_id: Optional[int] = None, + use_exllama: Optional[bool] = None, + max_input_length: Optional[int] = None, + exllama_config: Optional[Dict[str, Any]] = None, + cache_block_outputs: bool = True, + **kwargs, + ): + self.quant_method = QuantizationMethod.GPTQ + self.bits = bits + self.tokenizer = tokenizer + self.dataset = dataset + self.group_size = group_size + self.damp_percent = damp_percent + self.desc_act = desc_act + self.sym = sym + self.true_sequential = true_sequential + self.use_cuda_fp16 = use_cuda_fp16 + self.model_seqlen = model_seqlen + self.block_name_to_quantize = block_name_to_quantize + self.module_name_preceding_first_block = module_name_preceding_first_block + self.batch_size = batch_size + self.pad_token_id = pad_token_id + self.use_exllama = use_exllama + self.max_input_length = max_input_length + self.exllama_config = exllama_config + self.disable_exllama = kwargs.pop("disable_exllama", None) + self.cache_block_outputs = cache_block_outputs + self.post_init() + + def get_loading_attributes(self): + attibutes_dict = copy.deepcopy(self.__dict__) + loading_attibutes = ["disable_exllama", "use_exllama", "exllama_config", "use_cuda_fp16", "max_input_length"] + loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} + return loading_attibutes_dict + + def post_init(self): + r""" + Safety checker that arguments are correct + """ + if self.bits not in [2, 3, 4, 8]: + raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}") + if self.group_size != -1 and self.group_size <= 0: + raise ValueError("group_size must be greater than 0 or equal to -1") + if not (0 < self.damp_percent < 1): + raise ValueError("damp_percent must between 0 and 1.") + if self.dataset is not None: + if isinstance(self.dataset, str): + if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: + raise ValueError( + f"""You have entered a string value for dataset. You can only choose between + ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" + ) + elif not isinstance(self.dataset, list): + raise ValueError( + f"""dataset needs to be either a list of string or a value in + ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" + ) + + if self.disable_exllama is None and self.use_exllama is None: + # New default behaviour + self.use_exllama = True + elif self.disable_exllama is not None and self.use_exllama is None: + # Follow pattern of old config + logger.warning( + "Using `disable_exllama` is deprecated and will be removed in version 4.37. Use `use_exllama` instead and specify the version with `exllama_config`." + "The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file." + ) + self.use_exllama = not self.disable_exllama + self.disable_exllama = None + elif self.disable_exllama is not None and self.use_exllama is not None: + # Only happens if user explicitly passes in both arguments + raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`") + + if self.exllama_config is None: + self.exllama_config = {"version": ExllamaVersion.ONE} + else: + if "version" not in self.exllama_config: + raise ValueError("`exllama_config` needs to have a `version` key.") + elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: + exllama_version = self.exllama_config["version"] + raise ValueError( + f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" + ) + + if self.bits == 4 and self.use_exllama: + if self.exllama_config["version"] == ExllamaVersion.ONE: + logger.info( + "You have activated exllama backend. Note that you can get better inference " + "speed using exllamav2 kernel by setting `exllama_config`." + ) + elif self.exllama_config["version"] == ExllamaVersion.TWO: + optimum_version = version.parse(importlib.metadata.version("optimum")) + autogptq_version = version.parse(importlib.metadata.version("auto_gptq")) + if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"): + raise ValueError( + f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}" + ) + + def to_dict(self): + config_dict = super().to_dict() + config_dict.pop("disable_exllama", None) + return config_dict + + def to_dict_optimum(self): + """ + Get compatible dict for optimum gptq config + """ + quant_dict = self.to_dict() + # make it compatible with optimum config + quant_dict["disable_exllama"] = not self.use_exllama + return quant_dict + + @classmethod + def from_dict_optimum(cls, config_dict): + """ + Get compatible class with optimum gptq config dict + """ + + if "disable_exllama" in config_dict: + config_dict["use_exllama"] = not config_dict["disable_exllama"] + # switch to None to not trigger the warning + config_dict["disable_exllama"] = None + + config = cls(**config_dict) + return config + + +@dataclass +class AwqConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `auto-awq` library awq quantization relying on auto_awq backend. + + Args: + bits (`int`, *optional*, defaults to 4): + The number of bits to quantize to. + group_size (`int`, *optional*, defaults to 128): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. + zero_point (`bool`, *optional*, defaults to `True`): + Whether to use zero point quantization. + version (`AWQLinearVersion`, *optional*, defaults to `AWQLinearVersion.GEMM`): + The version of the quantization algorithm to use. GEMM is better for big batch_size (e.g. >= 8) otherwise, + GEMV is better (e.g. < 8 ) + backend (`AwqBackendPackingMethod`, *optional*, defaults to `AwqBackendPackingMethod.AUTOAWQ`): + The quantization backend. Some models might be quantized using `llm-awq` backend. This is useful for users + that quantize their own models using `llm-awq` library. + do_fuse (`bool`, *optional*, defaults to `False`): + Whether to fuse attention and mlp layers together for faster inference + fuse_max_seq_len (`int`, *optional*): + The Maximum sequence length to generate when using fusing. + modules_to_fuse (`dict`, *optional*, default to `None`): + Overwrite the natively supported fusing scheme with the one specified by the users. + """ + + def __init__( + self, + bits: int = 4, + group_size: int = 128, + zero_point: bool = True, + version: AWQLinearVersion = AWQLinearVersion.GEMM, + backend: AwqBackendPackingMethod = AwqBackendPackingMethod.AUTOAWQ, + do_fuse: Optional[bool] = None, + fuse_max_seq_len: Optional[int] = None, + modules_to_fuse: Optional[dict] = None, + **kwargs, + ): + self.quant_method = QuantizationMethod.AWQ + + self.bits = bits + self.group_size = group_size + self.zero_point = zero_point + self.version = version + self.backend = backend + self.fuse_max_seq_len = fuse_max_seq_len + + self.modules_to_fuse = modules_to_fuse + if do_fuse is None: + self.do_fuse = modules_to_fuse is not None and len(modules_to_fuse) > 0 + else: + self.do_fuse = do_fuse + self.fuse_max_seq_len = fuse_max_seq_len + + self.post_init() + + def post_init(self): + r""" + Safety checker that arguments are correct + """ + if not torch.cuda.is_available(): + raise ValueError("AWQ is only available on GPU") + + if self.backend not in [AwqBackendPackingMethod.AUTOAWQ, AwqBackendPackingMethod.LLMAWQ]: + raise ValueError( + f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}" + ) + + self.version = AWQLinearVersion.from_str(self.version) + if self.version not in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV]: + raise ValueError( + f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV] - not recognized version {self.version}" + ) + + if self.backend == AwqBackendPackingMethod.LLMAWQ: + compute_capability = torch.cuda.get_device_capability() + major, minor = compute_capability + if major < 8: + raise ValueError("LLM-AWQ backend is only supported on GPUs with compute capability >= 8.0") + + if self.do_fuse and self.fuse_max_seq_len is None: + raise ValueError( + "You cannot enable fused modules without specifying a `fuse_max_seq_len`, make sure to pass a valid `fuse_max_seq_len` for your usecase" + ) + + if self.do_fuse: + awq_version_supports_fusing = False + MIN_AWQ_VERSION = "0.1.7" + if is_auto_awq_available(): + awq_version_supports_fusing = version.parse(importlib.metadata.version("autoawq")) >= version.parse( + MIN_AWQ_VERSION + ) + + if not awq_version_supports_fusing: + raise ValueError( + f"You current version of `autoawq` does not support module fusing, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." + ) + + if self.do_fuse and self.modules_to_fuse is not None: + required_keys = [ + "hidden_size", + "num_attention_heads", + "num_key_value_heads", + "mlp", + "attention", + "layernorm", + "use_alibi", + ] + if not all(key in self.modules_to_fuse for key in required_keys): + raise ValueError( + f"Required fields are missing in the fusing mapping, required fields are {required_keys}" + ) + + def get_loading_attributes(self): + attibutes_dict = copy.deepcopy(self.__dict__) + loading_attibutes = ["do_fuse", "modules_to_fuse", "fuse_max_seq_len"] + loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} + return loading_attibutes_dict diff --git a/modified/utils/sentencepiece_model_pb2.py b/modified/utils/sentencepiece_model_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b2992a6308c9f98f68486998d57d00bbdc1e34 --- /dev/null +++ b/modified/utils/sentencepiece_model_pb2.py @@ -0,0 +1,1511 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: sentencepiece_model.proto + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="sentencepiece_model.proto", + package="sentencepiece", + syntax="proto2", + serialized_options=b"H\003", + create_key=_descriptor._internal_create_key, + serialized_pb=( + b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\xa1\n\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01' + b" \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02" + b" \x01(\t\x12\x41\n\nmodel_type\x18\x03" + b" \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04" + b" \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12" + b' \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n' + b" \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b" + b" \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12" + b' \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r' + b" \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e" + b" \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f" + b" \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12" + b" \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10" + b" \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11" + b" \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14" + b" \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15" + b" \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17" + b" \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16" + b" \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18" + b" \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19" + b" \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e" + b" \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$" + b" \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18" + b' \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18"' + b" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18)" + b" \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+" + b" \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05\x12\x16\n\tbos_piece\x18." + b" \x01(\t:\x03\x12\x17\n\teos_piece\x18/ \x01(\t:\x04\x12\x18\n\tpad_piece\x18\x30" + b" \x01(\t:\x05\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87" + b" \x12+\n\x1ctrain_extremely_large_corpus\x18\x31" + b' \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01' + b" \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03" + b" \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12" + b" \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06" + b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01' + b' \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01' + b" \x01(\t\x12\x10\n\x08\x65xpected\x18\x02" + b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01' + b" \x03(\x0b\x32'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02" + b" \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03" + b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04" + b" \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05" + b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01" + b" \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03" + b' \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' + ), +) + + +_TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor( + name="ModelType", + full_name="sentencepiece.TrainerSpec.ModelType", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="UNIGRAM", + index=0, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="BPE", + index=1, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="WORD", + index=2, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CHAR", + index=3, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=1294, + serialized_end=1347, +) +_sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE) + +_MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor( + name="Type", + full_name="sentencepiece.ModelProto.SentencePiece.Type", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="NORMAL", + index=0, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="UNKNOWN", + index=1, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CONTROL", + index=2, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="USER_DEFINED", + index=3, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="BYTE", + index=4, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="UNUSED", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=2100, + serialized_end=2184, +) +_sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE) + + +_TRAINERSPEC = _descriptor.Descriptor( + name="TrainerSpec", + full_name="sentencepiece.TrainerSpec", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="input", + full_name="sentencepiece.TrainerSpec.input", + index=0, + number=1, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="input_format", + full_name="sentencepiece.TrainerSpec.input_format", + index=1, + number=7, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="model_prefix", + full_name="sentencepiece.TrainerSpec.model_prefix", + index=2, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="model_type", + full_name="sentencepiece.TrainerSpec.model_type", + index=3, + number=3, + type=14, + cpp_type=8, + label=1, + has_default_value=True, + default_value=1, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="vocab_size", + full_name="sentencepiece.TrainerSpec.vocab_size", + index=4, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=8000, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="accept_language", + full_name="sentencepiece.TrainerSpec.accept_language", + index=5, + number=5, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="self_test_sample_size", + full_name="sentencepiece.TrainerSpec.self_test_sample_size", + index=6, + number=6, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="character_coverage", + full_name="sentencepiece.TrainerSpec.character_coverage", + index=7, + number=10, + type=2, + cpp_type=6, + label=1, + has_default_value=True, + default_value=float(0.9995), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="input_sentence_size", + full_name="sentencepiece.TrainerSpec.input_sentence_size", + index=8, + number=11, + type=4, + cpp_type=4, + label=1, + has_default_value=True, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="shuffle_input_sentence", + full_name="sentencepiece.TrainerSpec.shuffle_input_sentence", + index=9, + number=19, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="mining_sentence_size", + full_name="sentencepiece.TrainerSpec.mining_sentence_size", + index=10, + number=12, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\030\001", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="training_sentence_size", + full_name="sentencepiece.TrainerSpec.training_sentence_size", + index=11, + number=13, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\030\001", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="seed_sentencepiece_size", + full_name="sentencepiece.TrainerSpec.seed_sentencepiece_size", + index=12, + number=14, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=1000000, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="shrinking_factor", + full_name="sentencepiece.TrainerSpec.shrinking_factor", + index=13, + number=15, + type=2, + cpp_type=6, + label=1, + has_default_value=True, + default_value=float(0.75), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="max_sentence_length", + full_name="sentencepiece.TrainerSpec.max_sentence_length", + index=14, + number=18, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=4192, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="num_threads", + full_name="sentencepiece.TrainerSpec.num_threads", + index=15, + number=16, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=16, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="num_sub_iterations", + full_name="sentencepiece.TrainerSpec.num_sub_iterations", + index=16, + number=17, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=2, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="max_sentencepiece_length", + full_name="sentencepiece.TrainerSpec.max_sentencepiece_length", + index=17, + number=20, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=16, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="split_by_unicode_script", + full_name="sentencepiece.TrainerSpec.split_by_unicode_script", + index=18, + number=21, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="split_by_number", + full_name="sentencepiece.TrainerSpec.split_by_number", + index=19, + number=23, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="split_by_whitespace", + full_name="sentencepiece.TrainerSpec.split_by_whitespace", + index=20, + number=22, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="treat_whitespace_as_suffix", + full_name="sentencepiece.TrainerSpec.treat_whitespace_as_suffix", + index=21, + number=24, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="split_digits", + full_name="sentencepiece.TrainerSpec.split_digits", + index=22, + number=25, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="control_symbols", + full_name="sentencepiece.TrainerSpec.control_symbols", + index=23, + number=30, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="user_defined_symbols", + full_name="sentencepiece.TrainerSpec.user_defined_symbols", + index=24, + number=31, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="required_chars", + full_name="sentencepiece.TrainerSpec.required_chars", + index=25, + number=36, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="byte_fallback", + full_name="sentencepiece.TrainerSpec.byte_fallback", + index=26, + number=35, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="vocabulary_output_piece_score", + full_name="sentencepiece.TrainerSpec.vocabulary_output_piece_score", + index=27, + number=32, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="hard_vocab_limit", + full_name="sentencepiece.TrainerSpec.hard_vocab_limit", + index=28, + number=33, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="use_all_vocab", + full_name="sentencepiece.TrainerSpec.use_all_vocab", + index=29, + number=34, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="unk_id", + full_name="sentencepiece.TrainerSpec.unk_id", + index=30, + number=40, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="bos_id", + full_name="sentencepiece.TrainerSpec.bos_id", + index=31, + number=41, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=1, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="eos_id", + full_name="sentencepiece.TrainerSpec.eos_id", + index=32, + number=42, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=2, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="pad_id", + full_name="sentencepiece.TrainerSpec.pad_id", + index=33, + number=43, + type=5, + cpp_type=1, + label=1, + has_default_value=True, + default_value=-1, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="unk_piece", + full_name="sentencepiece.TrainerSpec.unk_piece", + index=34, + number=45, + type=9, + cpp_type=9, + label=1, + has_default_value=True, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="bos_piece", + full_name="sentencepiece.TrainerSpec.bos_piece", + index=35, + number=46, + type=9, + cpp_type=9, + label=1, + has_default_value=True, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="eos_piece", + full_name="sentencepiece.TrainerSpec.eos_piece", + index=36, + number=47, + type=9, + cpp_type=9, + label=1, + has_default_value=True, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="pad_piece", + full_name="sentencepiece.TrainerSpec.pad_piece", + index=37, + number=48, + type=9, + cpp_type=9, + label=1, + has_default_value=True, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="unk_surface", + full_name="sentencepiece.TrainerSpec.unk_surface", + index=38, + number=44, + type=9, + cpp_type=9, + label=1, + has_default_value=True, + default_value=b" \342\201\207 ".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="train_extremely_large_corpus", + full_name="sentencepiece.TrainerSpec.train_extremely_large_corpus", + index=39, + number=49, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[ + _TRAINERSPEC_MODELTYPE, + ], + serialized_options=None, + is_extendable=True, + syntax="proto2", + extension_ranges=[ + (200, 536870912), + ], + oneofs=[], + serialized_start=45, + serialized_end=1358, +) + + +_NORMALIZERSPEC = _descriptor.Descriptor( + name="NormalizerSpec", + full_name="sentencepiece.NormalizerSpec", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="sentencepiece.NormalizerSpec.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="precompiled_charsmap", + full_name="sentencepiece.NormalizerSpec.precompiled_charsmap", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="add_dummy_prefix", + full_name="sentencepiece.NormalizerSpec.add_dummy_prefix", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="remove_extra_whitespaces", + full_name="sentencepiece.NormalizerSpec.remove_extra_whitespaces", + index=3, + number=4, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="escape_whitespaces", + full_name="sentencepiece.NormalizerSpec.escape_whitespaces", + index=4, + number=5, + type=8, + cpp_type=7, + label=1, + has_default_value=True, + default_value=True, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="normalization_rule_tsv", + full_name="sentencepiece.NormalizerSpec.normalization_rule_tsv", + index=5, + number=6, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=True, + syntax="proto2", + extension_ranges=[ + (200, 536870912), + ], + oneofs=[], + serialized_start=1361, + serialized_end=1570, +) + + +_SELFTESTDATA_SAMPLE = _descriptor.Descriptor( + name="Sample", + full_name="sentencepiece.SelfTestData.Sample", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="input", + full_name="sentencepiece.SelfTestData.Sample.input", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="expected", + full_name="sentencepiece.SelfTestData.Sample.expected", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto2", + extension_ranges=[], + oneofs=[], + serialized_start=1641, + serialized_end=1682, +) + +_SELFTESTDATA = _descriptor.Descriptor( + name="SelfTestData", + full_name="sentencepiece.SelfTestData", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="samples", + full_name="sentencepiece.SelfTestData.samples", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[ + _SELFTESTDATA_SAMPLE, + ], + enum_types=[], + serialized_options=None, + is_extendable=True, + syntax="proto2", + extension_ranges=[ + (200, 536870912), + ], + oneofs=[], + serialized_start=1572, + serialized_end=1693, +) + + +_MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor( + name="SentencePiece", + full_name="sentencepiece.ModelProto.SentencePiece", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="piece", + full_name="sentencepiece.ModelProto.SentencePiece.piece", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="score", + full_name="sentencepiece.ModelProto.SentencePiece.score", + index=1, + number=2, + type=2, + cpp_type=6, + label=1, + has_default_value=False, + default_value=float(0), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="type", + full_name="sentencepiece.ModelProto.SentencePiece.type", + index=2, + number=3, + type=14, + cpp_type=8, + label=1, + has_default_value=True, + default_value=1, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[ + _MODELPROTO_SENTENCEPIECE_TYPE, + ], + serialized_options=None, + is_extendable=True, + syntax="proto2", + extension_ranges=[ + (200, 536870912), + ], + oneofs=[], + serialized_start=1985, + serialized_end=2195, +) + +_MODELPROTO = _descriptor.Descriptor( + name="ModelProto", + full_name="sentencepiece.ModelProto", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="pieces", + full_name="sentencepiece.ModelProto.pieces", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="trainer_spec", + full_name="sentencepiece.ModelProto.trainer_spec", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="normalizer_spec", + full_name="sentencepiece.ModelProto.normalizer_spec", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="self_test_data", + full_name="sentencepiece.ModelProto.self_test_data", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="denormalizer_spec", + full_name="sentencepiece.ModelProto.denormalizer_spec", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[ + _MODELPROTO_SENTENCEPIECE, + ], + enum_types=[], + serialized_options=None, + is_extendable=True, + syntax="proto2", + extension_ranges=[ + (200, 536870912), + ], + oneofs=[], + serialized_start=1696, + serialized_end=2206, +) + +_TRAINERSPEC.fields_by_name["model_type"].enum_type = _TRAINERSPEC_MODELTYPE +_TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC +_SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA +_SELFTESTDATA.fields_by_name["samples"].message_type = _SELFTESTDATA_SAMPLE +_MODELPROTO_SENTENCEPIECE.fields_by_name["type"].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE +_MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO +_MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE +_MODELPROTO.fields_by_name["pieces"].message_type = _MODELPROTO_SENTENCEPIECE +_MODELPROTO.fields_by_name["trainer_spec"].message_type = _TRAINERSPEC +_MODELPROTO.fields_by_name["normalizer_spec"].message_type = _NORMALIZERSPEC +_MODELPROTO.fields_by_name["self_test_data"].message_type = _SELFTESTDATA +_MODELPROTO.fields_by_name["denormalizer_spec"].message_type = _NORMALIZERSPEC +DESCRIPTOR.message_types_by_name["TrainerSpec"] = _TRAINERSPEC +DESCRIPTOR.message_types_by_name["NormalizerSpec"] = _NORMALIZERSPEC +DESCRIPTOR.message_types_by_name["SelfTestData"] = _SELFTESTDATA +DESCRIPTOR.message_types_by_name["ModelProto"] = _MODELPROTO +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +TrainerSpec = _reflection.GeneratedProtocolMessageType( + "TrainerSpec", + (_message.Message,), + { + "DESCRIPTOR": _TRAINERSPEC, + "__module__": "sentencepiece_model_pb2", + # @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec) + }, +) +_sym_db.RegisterMessage(TrainerSpec) + +NormalizerSpec = _reflection.GeneratedProtocolMessageType( + "NormalizerSpec", + (_message.Message,), + { + "DESCRIPTOR": _NORMALIZERSPEC, + "__module__": "sentencepiece_model_pb2", + # @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec) + }, +) +_sym_db.RegisterMessage(NormalizerSpec) + +SelfTestData = _reflection.GeneratedProtocolMessageType( + "SelfTestData", + (_message.Message,), + { + "Sample": _reflection.GeneratedProtocolMessageType( + "Sample", + (_message.Message,), + { + "DESCRIPTOR": _SELFTESTDATA_SAMPLE, + "__module__": "sentencepiece_model_pb2", + # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample) + }, + ), + "DESCRIPTOR": _SELFTESTDATA, + "__module__": "sentencepiece_model_pb2", + # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData) + }, +) +_sym_db.RegisterMessage(SelfTestData) +_sym_db.RegisterMessage(SelfTestData.Sample) + +ModelProto = _reflection.GeneratedProtocolMessageType( + "ModelProto", + (_message.Message,), + { + "SentencePiece": _reflection.GeneratedProtocolMessageType( + "SentencePiece", + (_message.Message,), + { + "DESCRIPTOR": _MODELPROTO_SENTENCEPIECE, + "__module__": "sentencepiece_model_pb2", + # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece) + }, + ), + "DESCRIPTOR": _MODELPROTO, + "__module__": "sentencepiece_model_pb2", + # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto) + }, +) +_sym_db.RegisterMessage(ModelProto) +_sym_db.RegisterMessage(ModelProto.SentencePiece) + + +DESCRIPTOR._options = None +_TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None +_TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None +# @@protoc_insertion_point(module_scope) diff --git a/modified/utils/sentencepiece_model_pb2_new.py b/modified/utils/sentencepiece_model_pb2_new.py new file mode 100644 index 0000000000000000000000000000000000000000..4a2e29b1bdc308c4522e7ae283a10bfa1749991e --- /dev/null +++ b/modified/utils/sentencepiece_model_pb2_new.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: sentencepiece_model.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05\x12\x16\n\tbos_piece\x18. \x01(\t:\x03\x12\x17\n\teos_piece\x18/ \x01(\t:\x04\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' +) + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) +if _descriptor._USE_C_DESCRIPTORS is False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b"H\003" + # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) + # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None + # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" + # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None + # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" + _globals["_TRAINERSPEC"]._serialized_start = 45 + _globals["_TRAINERSPEC"]._serialized_end = 1581 + _globals["_TRAINERSPEC_MODELTYPE"]._serialized_start = 1517 + _globals["_TRAINERSPEC_MODELTYPE"]._serialized_end = 1570 + _globals["_NORMALIZERSPEC"]._serialized_start = 1584 + _globals["_NORMALIZERSPEC"]._serialized_end = 1793 + _globals["_SELFTESTDATA"]._serialized_start = 1795 + _globals["_SELFTESTDATA"]._serialized_end = 1916 + _globals["_SELFTESTDATA_SAMPLE"]._serialized_start = 1864 + _globals["_SELFTESTDATA_SAMPLE"]._serialized_end = 1905 + _globals["_MODELPROTO"]._serialized_start = 1919 + _globals["_MODELPROTO"]._serialized_end = 2429 + _globals["_MODELPROTO_SENTENCEPIECE"]._serialized_start = 2208 + _globals["_MODELPROTO_SENTENCEPIECE"]._serialized_end = 2418 + _globals["_MODELPROTO_SENTENCEPIECE_TYPE"]._serialized_start = 2323 + _globals["_MODELPROTO_SENTENCEPIECE_TYPE"]._serialized_end = 2407 +# @@protoc_insertion_point(module_scope) diff --git a/modified/utils/versions.py b/modified/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..945a3977ce62a9a55307862193e4be6f12c3c17f --- /dev/null +++ b/modified/utils/versions.py @@ -0,0 +1,117 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities for working with package versions +""" + +import importlib.metadata +import operator +import re +import sys +from typing import Optional + +from packaging import version + + +ops = { + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): + if got_ver is None or want_ver is None: + raise ValueError( + f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" + f" reinstalling {pkg}." + ) + if not ops[op](version.parse(got_ver), version.parse(want_ver)): + raise ImportError( + f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" + ) + + +def require_version(requirement: str, hint: Optional[str] = None) -> None: + """ + Perform a runtime check of the dependency versions, using the exact same syntax used by pip. + + The installed module version comes from the *site-packages* dir via *importlib.metadata*. + + Args: + requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" + hint (`str`, *optional*): what suggestion to print in case of requirements not being met + + Example: + + ```python + require_version("pandas>1.1.2") + require_version("numpy>1.18.5", "this is important to have for whatever reason") + ```""" + + hint = f"\n{hint}" if hint is not None else "" + + # non-versioned check + if re.match(r"^[\w_\-\d]+$", requirement): + pkg, op, want_ver = requirement, None, None + else: + match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" + f" got {requirement}" + ) + pkg, want_full = match[0] + want_range = want_full.split(",") # there could be multiple requirements + wanted = {} + for w in want_range: + match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," + f" but got {requirement}" + ) + op, want_ver = match[0] + wanted[op] = want_ver + if op not in ops: + raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") + + # special case + if pkg == "python": + got_ver = ".".join([str(x) for x in sys.version_info[:3]]) + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + return + + # check if any version is installed + try: + got_ver = importlib.metadata.version(pkg) + except importlib.metadata.PackageNotFoundError: + raise importlib.metadata.PackageNotFoundError( + f"The '{requirement}' distribution was not found and is required by this application. {hint}" + ) + + # check that the right version is installed if version number or a range was provided + if want_ver is not None: + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + + +def require_version_core(requirement): + """require_version wrapper which emits a core-specific hint on failure""" + hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" + return require_version(requirement, hint) diff --git a/phi-2/LICENSE b/phi-2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..62c0b5ff3a872f795526e30f09d9cc6ffd7b3b05 --- /dev/null +++ b/phi-2/LICENSE @@ -0,0 +1,71 @@ +MICROSOFT RESEARCH LICENSE TERMS + +IF YOU LIVE IN THE UNITED STATES, PLEASE READ THE “BINDING ARBITRATION AND CLASS ACTION WAIVER” SECTION BELOW. IT AFFECTS HOW DISPUTES ARE RESOLVED. + +These license terms are an agreement between you and Microsoft Corporation (or one of its affiliates). They apply to the source code, object code, machine learning models, or data (collectively “Materials”) that accompany this license. IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. BY USING THE MATERIALS, YOU ACCEPT THESE TERMS. + +1) INSTALLATION AND USE RIGHTS TO THE MATERIALS. + +Subject to the terms of this agreement, you have the below rights, if applicable, to use the Materials solely for non-commercial, non-revenue generating, research purposes: + +a) Source Code. If source code is included, you may use and modify the source code, but you may not distribute the source code. + +b) Object Code. If object code is included, you may use the object code, but you may not distribute the object code. + +c) Models. If machine learning model(s) are included, you may use the model(s), but you may not distribute the models. + +d) Data. If data is included, you may use and modify the data, but your use and modification must be consistent with the consent under which the data was provided and/or gathered and you may not distribute the data or your modifications to the data. + +2) SCOPE OF LICENSE. The Materials are licensed, not sold. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you will not (and have no right to): + +a) work around any technical limitations in the Materials that only allow you to use it in certain ways; + +b) reverse engineer, decompile or disassemble the Materials; + +c) remove, minimize, block, or modify any notices of Microsoft or its suppliers in the Materials; + +d) use the Materials in any way that is against the law or to create or propagate malware; or + +e) share, publish, distribute or lend the Materials, provide the Materials as a stand-alone hosted solution for others to use, or transfer the Materials or this agreement to any third party. + +3) PERSONAL DATA. If the data (set forth in Section 1(c) above) includes or is found to include any data that enables any ability to identify an individual (“Personal Data”), you will not use such Personal Data for any purpose other than was authorized and consented to by the data subject/research participant. You will not use Personal Data to contact any person. You will keep Personal Data in strict confidence. You will not share any Personal Data that is collected or in your possession with any third party for any reason and as required under the original consent agreement. Further, you will destroy the Personal Data and any backup or copies, immediately upon the completion of your research. + +4) LICENSE TO MICROSOFT. Notwithstanding the limitations in Section 1, you may distribute your modifications back to Microsoft, and if you do provide Microsoft with modifications of the Materials, you hereby grant Microsoft, without any restrictions or limitations, a non-exclusive, perpetual, irrevocable, royalty-free, assignable and sub-licensable license, to reproduce, publicly perform or display, install, use, modify, post, distribute, make and have made, sell and transfer such modifications and derivatives for any purpose. + +5) PUBLICATION. You may publish (or present papers or articles) on your results from using the Materials provided that no material or substantial portion of the Materials is included in any such publication or presentation. + +6) FEEDBACK. Any feedback about the Materials provided by you to us is voluntarily given, and Microsoft shall be free to use the feedback as it sees fit without obligation or restriction of any kind, even if the + +feedback is designated by you as confidential. Such feedback shall be considered a contribution and licensed to Microsoft under the terms of Section 4 above. + +7) EXPORT RESTRICTIONS. You must comply with all domestic and international export laws and regulations that apply to the Materials, which include restrictions on destinations, end users, and end use. For further information on export restrictions, visit (aka.ms/exporting). + +8) SUPPORT SERVICES. Microsoft is not obligated under this agreement to provide any support services for the Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind. + +9) BINDING ARBITRATION AND CLASS ACTION WAIVER. This Section applies if you live in (or, if a business, your principal place of business is in) the United States. If you and Microsoft have a dispute, you and Microsoft agree to try for 60 days to resolve it informally. If you and Microsoft can’t, you and Microsoft agree to binding individual arbitration before the American Arbitration Association under the Federal Arbitration Act (“FAA”), and not to sue in court in front of a judge or jury. Instead, a neutral arbitrator will decide. Class action lawsuits, class-wide arbitrations, private attorney-general actions, and any other proceeding where someone acts in a representative capacity are not allowed; nor is combining individual proceedings without the consent of all parties. The complete Arbitration Agreement contains more terms and is at aka.ms/arb-agreement-1. You and Microsoft agree to these terms. + +10) ENTIRE AGREEMENT. This agreement, and any other terms Microsoft may provide for supplements, updates, or third-party applications, is the entire agreement for the Materials. + +11) APPLICABLE LAW AND PLACE TO RESOLVE DISPUTES. If you acquired the Materials in the United States or Canada, the laws of the state or province where you live (or, if a business, where your principal place of business is located) govern the interpretation of this agreement, claims for its breach, and all other claims (including consumer protection, unfair competition, and tort claims), regardless of conflict of laws principles, except that the FAA governs everything related to arbitration. If you acquired the Materials in any other country, its laws apply, except that the FAA governs everything related to arbitration. If U.S. federal jurisdiction exists, you and Microsoft consent to exclusive jurisdiction and venue in the federal court in King County, Washington for all disputes heard in court (excluding arbitration). If not, you and Microsoft consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington for all disputes heard in court (excluding arbitration). + +12) CONSUMER RIGHTS; REGIONAL VARIATIONS. This agreement describes certain legal rights. You may have other rights, including consumer rights, under the laws of your state, province, or country. Separate and apart from your relationship with Microsoft, you may also have rights with respect to the party from which you acquired the Materials. This agreement does not change those other rights if the laws of your state, province, or country do not permit it to do so. For example, if you acquired the Materials in one of the below regions, or mandatory country law applies, then the following provisions apply to you: + +a) Australia. You have statutory guarantees under the Australian Consumer Law and nothing in this agreement is intended to affect those rights. + +b) Canada. If you acquired this software in Canada, you may stop receiving updates by turning off the automatic update feature, disconnecting your device from the Internet (if and when you re-connect to the Internet, however, the Materials will resume checking for and installing updates), or uninstalling the Materials. The product documentation, if any, may also specify how to turn off updates for your specific device or software. + +c) Germany and Austria. + +i. Warranty. The properly licensed software will perform substantially as described in any Microsoft materials that accompany the Materials. However, Microsoft gives no contractual guarantee in relation to the licensed software. + +ii. Limitation of Liability. In case of intentional conduct, gross negligence, claims based on the Product Liability Act, as well as, in case of death or personal or physical injury, Microsoft is liable according to the statutory law. + +Subject to the foregoing clause (ii), Microsoft will only be liable for slight negligence if Microsoft is in breach of such material contractual obligations, the fulfillment of which facilitate the due performance of this agreement, the breach of which would endanger the purpose of this agreement and the compliance with which a party may constantly trust in (so-called "cardinal obligations"). In other cases of slight negligence, Microsoft will not be liable for slight negligence. + +13) DISCLAIMER OF WARRANTY. THE MATERIALS ARE LICENSED “AS IS.” YOU BEAR THE RISK OF USING THEM. MICROSOFT GIVES NO EXPRESS WARRANTIES, GUARANTEES, OR CONDITIONS. TO THE EXTENT PERMITTED UNDER APPLICABLE LAWS, MICROSOFT EXCLUDES ALL IMPLIED WARRANTIES, INCLUDING MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. + +14) LIMITATION ON AND EXCLUSION OF DAMAGES. IF YOU HAVE ANY BASIS FOR RECOVERING DAMAGES DESPITE THE PRECEDING DISCLAIMER OF WARRANTY, YOU CAN RECOVER FROM MICROSOFT AND ITS SUPPLIERS ONLY DIRECT DAMAGES UP TO U.S. $5.00. YOU CANNOT RECOVER ANY OTHER DAMAGES, INCLUDING CONSEQUENTIAL, LOST PROFITS, SPECIAL, INDIRECT OR INCIDENTAL DAMAGES. + +This limitation applies to (a) anything related to the Materials, services, content (including code) on third party Internet sites, or third party applications; and (b) claims for breach of contract, warranty, guarantee, or condition; strict liability, negligence, or other tort; or any other claim; in each case to the extent permitted by applicable law. + +It also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your state, province, or country may not allow the exclusion or limitation of incidental, consequential, or other damages. \ No newline at end of file diff --git a/phi-2/README.md b/phi-2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bb7d1d71e64dbe6710a9602b76e5eb0ce8b1619a --- /dev/null +++ b/phi-2/README.md @@ -0,0 +1,36 @@ +--- +license: other +license_name: microsoft-research-license +license_link: LICENSE +--- + +**DISCLAIMER**: I don't own the weights to this model, this is a property of Microsoft and taken from their official repository : [microsoft/phi-2](https://huggingface.co/microsoft/phi-2). +The sole purpose of this repository is to use this model through the `transformers` API or to load and use the model using the HuggingFace `transformers` library. + + +# Usage + +First make sure you have the latest version of the `transformers` installed. + +``` +pip install -U transformers +``` + +Then use the transformers library to load the model from the library itself + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("susnato/phi-2") +tokenizer = AutoTokenizer.from_pretrained("susnato/phi-2") + +inputs = tokenizer('''def print_prime(n): + """ + Print all primes between 1 and n + """''', return_tensors="pt", return_attention_mask=False) + +outputs = model.generate(**inputs, max_length=200) +text = tokenizer.batch_decode(outputs)[0] +print(text) + +``` diff --git a/phi-2/added_tokens.json b/phi-2/added_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7debb4784a7d53328d4d021fc46314bec4af3833 --- /dev/null +++ b/phi-2/added_tokens.json @@ -0,0 +1,40 @@ +{ + "\t\t": 50294, + "\t\t\t": 50293, + "\t\t\t\t": 50292, + "\t\t\t\t\t": 50291, + "\t\t\t\t\t\t": 50290, + "\t\t\t\t\t\t\t": 50289, + "\t\t\t\t\t\t\t\t": 50288, + "\t\t\t\t\t\t\t\t\t": 50287, + " ": 50286, + " ": 50285, + " ": 50284, + " ": 50283, + " ": 50282, + " ": 50281, + " ": 50280, + " ": 50279, + " ": 50278, + " ": 50277, + " ": 50276, + " ": 50275, + " ": 50274, + " ": 50273, + " ": 50272, + " ": 50271, + " ": 50270, + " ": 50269, + " ": 50268, + " ": 50267, + " ": 50266, + " ": 50265, + " ": 50264, + " ": 50263, + " ": 50262, + " ": 50261, + " ": 50260, + " ": 50259, + " ": 50258, + " ": 50257 +} diff --git a/phi-2/config.json b/phi-2/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a8b979a430e1f83ea812ac1828ec711ce507fcec --- /dev/null +++ b/phi-2/config.json @@ -0,0 +1,28 @@ +{ + "architectures": [ + "PhiForCausalLM" + ], + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "gelu_new", + "hidden_size": 2560, + "initializer_range": 0.02, + "intermediate_size": 10240, + "max_position_embeddings": 2048, + "model_type": "phi", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "pretraining_tp": 1, + "resid_pdrop": 0.1, + "embd_pdrop": 0.0, + "layer_norm_eps": 1e-05, + "rope_scaling": null, + "rope_theta": 10000.0, + "partial_rotary_factor": 0.4, + "qk_layernorm": false, + "tie_word_embeddings": false, + "torch_dtype": "float16", + "transformers_version": "4.35.2", + "use_cache": true, + "vocab_size": 51200 +} \ No newline at end of file diff --git a/phi-2/generation_config.json b/phi-2/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..ab7f9a7033c095caf3dda0219caef2b2e690b5b6 --- /dev/null +++ b/phi-2/generation_config.json @@ -0,0 +1,4 @@ +{ + "_from_model_config": true, + "transformers_version": "4.35.2" +} diff --git a/phi-2/merges.txt b/phi-2/merges.txt new file mode 100644 index 0000000000000000000000000000000000000000..226b0752cac7789c48f0cb3ec53eda48b7be36cc --- /dev/null +++ b/phi-2/merges.txt @@ -0,0 +1,50001 @@ +#version: 0.2 +Ġ t +Ġ a +h e +i n +r e +o n +Ġt he +e r +Ġ s +a t +Ġ w +Ġ o +e n +Ġ c +i t +i s +a n +o r +e s +Ġ b +e d +Ġ f +in g +Ġ p +o u +Ġa n +a l +a r +Ġt o +Ġ m +Ġo f +Ġ in +Ġ d +Ġ h +Ġan d +i c +a s +l e +Ġt h +i on +o m +l l +en t +Ġ n +Ġ l +s t +Ġ re +v e +Ġ e +r o +l y +Ġb e +Ġ g +Ġ T +c t +Ġ S +i d +o t +Ġ I +u t +e t +Ġ A +Ġ is +Ġ on +i m +a m +o w +a y +a d +s e +Ġth at +Ġ C +i g +Ġf or +a c +Ġ y +v er +u r +Ġ u +l d +Ġs t +Ġ M +' s +Ġ he +Ġ it +at ion +it h +i r +c e +Ġy ou +i l +Ġ B +Ġw h +o l +Ġ P +Ġw ith +Ġ 1 +t er +c h +Ġa s +Ġw e +Ġ ( +n d +i ll +Ġ D +i f +Ġ 2 +a g +er s +k e +Ġ " +Ġ H +e m +Ġc on +Ġ W +Ġ R +he r +Ġw as +Ġ r +o d +Ġ F +u l +at e +Ġa t +r i +p p +o re +ĠT he +Ġs e +u s +Ġp ro +Ġh a +u m +Ġa re +Ġd e +a in +an d +Ġo r +ig h +es t +is t +a b +r om +Ġ N +t h +Ġc om +Ġ G +u n +o p +0 0 +Ġ L +Ġn ot +es s +Ġe x +Ġ v +re s +Ġ E +e w +it y +an t +Ġb y +e l +o s +or t +o c +q u +Ġf rom +Ġha ve +Ġs u +i ve +ou ld +Ġs h +Ġth is +n t +r a +p e +igh t +ar t +m ent +Ġa l +u st +en d +- - +al l +Ġ O +ac k +Ġc h +Ġ le +i es +re d +ar d +â Ģ +ou t +Ġ J +Ġa b +e ar +i v +al ly +ou r +o st +g h +p t +Ġp l +as t +Ġc an +a k +om e +u d +T he +Ġh is +Ġd o +Ġg o +Ġh as +g e +' t +Ġ U +r ou +Ġs a +Ġ j +Ġb ut +Ġw or +Ġa ll +e ct +Ġ k +am e +Ġw ill +o k +Ġw he +Ġthe y +id e +0 1 +f f +ic h +p l +t her +Ġt r +. . +Ġin t +i e +u re +ag e +Ġn e +i al +a p +in e +ic e +Ġm e +Ġo ut +an s +on e +on g +ion s +Ġwh o +Ġ K +Ġu p +Ġthe ir +Ġa d +Ġ 3 +Ġu s +at ed +ou s +Ġm ore +u e +o g +ĠS t +in d +i ke +Ġs o +im e +p er +. " +b er +i z +a ct +Ġon e +Ġsa id +Ġ - +a re +Ġyou r +c c +ĠT h +Ġc l +e p +a ke +ab le +i p +Ġcon t +Ġwh ich +i a +Ġ im +Ġab out +Ġwe re +ver y +u b +Ġh ad +Ġ en +Ġcom p +, " +ĠI n +Ġu n +Ġa g +i re +ac e +a u +ar y +Ġw ould +as s +r y +Ġ âĢ +c l +o ok +e re +s o +Ġ V +ig n +i b +Ġof f +Ġt e +v en +Ġ Y +i le +o se +it e +or m +Ġ2 01 +Ġre s +Ġm an +Ġp er +Ġo ther +or d +ul t +Ġbe en +Ġl ike +as e +an ce +k s +ay s +ow n +en ce +Ġd is +ct ion +Ġan y +Ġa pp +Ġs p +in t +res s +ation s +a il +Ġ 4 +ic al +Ġthe m +Ġhe r +ou nt +ĠC h +Ġa r +Ġ if +Ġthe re +Ġp e +Ġy ear +a v +Ġm y +Ġs ome +Ġwhe n +ou gh +ac h +Ġth an +r u +on d +ic k +Ġo ver +ve l +Ġ qu +Ċ Ċ +Ġs c +re at +re e +ĠI t +ou nd +p ort +Ġal so +Ġp art +f ter +Ġk n +Ġbe c +Ġt ime +en s +Ġ 5 +op le +Ġwh at +Ġn o +d u +m er +an g +Ġn ew +-- -- +Ġg et +or y +it ion +ing s +Ġj ust +Ġint o +Ġ 0 +ent s +o ve +t e +Ġpe ople +Ġp re +Ġit s +Ġre c +Ġt w +i an +ir st +ar k +or s +Ġwor k +ad e +o b +Ġs he +Ġo ur +w n +in k +l ic +Ġ1 9 +ĠH e +is h +nd er +au se +Ġh im +on s +Ġ [ +Ġ ro +f orm +i ld +at es +ver s +Ġon ly +o ll +Ġs pe +c k +e ll +am p +Ġa cc +Ġb l +i ous +ur n +f t +o od +Ġh ow +he d +Ġ ' +Ġa fter +a w +Ġat t +o v +n e +Ġpl ay +er v +ic t +Ġc ould +it t +Ġa m +Ġf irst +Ġ 6 +Ġa ct +Ġ $ +e c +h ing +u al +u ll +Ġcom m +o y +o ld +c es +at er +Ġf e +Ġbe t +w e +if f +Ġtw o +oc k +Ġb ack +) . +id ent +Ġu nder +rou gh +se l +x t +Ġm ay +rou nd +Ġp o +p h +is s +Ġd es +Ġm ost +Ġd id +Ġad d +j ect +Ġin c +f ore +Ġp ol +on t +Ġag ain +cl ud +ter n +Ġkn ow +Ġne ed +Ġcon s +Ġc o +Ġ . +Ġw ant +Ġse e +Ġ 7 +n ing +i ew +ĠTh is +c ed +Ġe ven +Ġin d +t y +ĠW e +at h +Ġthe se +Ġp r +Ġu se +Ġbec ause +Ġf l +n g +Ġn ow +ĠâĢ ĵ +c om +is e +Ġm ake +Ġthe n +ow er +Ġe very +ĠU n +Ġse c +os s +u ch +Ġe m +Ġ = +ĠR e +i ed +r it +Ġin v +le ct +Ġsu pp +at ing +Ġl ook +m an +pe ct +Ġ 8 +ro w +Ġb u +Ġwhe re +if ic +Ġyear s +i ly +Ġd iff +Ġsh ould +Ġre m +T h +I n +Ġe v +d ay +' re +ri b +Ġre l +s s +Ġde f +Ġr ight +Ġs y +) , +l es +00 0 +he n +Ġth rough +ĠT r +_ _ +Ġw ay +Ġd on +Ġ , +Ġ1 0 +as ed +Ġas s +ub lic +Ġre g +ĠA nd +i x +Ġ very +Ġin clud +ot her +Ġim p +ot h +Ġsu b +ĠâĢ Ķ +Ġbe ing +ar g +ĠW h += = +ib le +Ġdo es +an ge +r am +Ġ 9 +er t +p s +it ed +ation al +Ġb r +Ġd own +Ġman y +ak ing +Ġc all +ur ing +it ies +Ġp h +ic s +al s +Ġde c +at ive +en er +Ġbe fore +il ity +Ġwe ll +Ġm uch +ers on +Ġth ose +Ġsu ch +Ġ ke +Ġ end +ĠB ut +as on +t ing +Ġl ong +e f +Ġth ink +y s +Ġbe l +Ġs m +it s +a x +Ġo wn +Ġpro v +Ġs et +if e +ment s +b le +w ard +Ġsh ow +Ġp res +m s +om et +Ġo b +Ġs ay +ĠS h +t s +f ul +Ġe ff +Ġg u +Ġin st +u nd +re n +c ess +Ġ ent +ĠY ou +Ġgo od +Ġst art +in ce +Ġm ade +t t +st em +ol og +u p +Ġ | +um p +Ġhe l +ver n +ul ar +u ally +Ġa c +Ġm on +Ġl ast +Ġ2 00 +1 0 +Ġst ud +u res +ĠA r +sel f +ar s +mer ic +u es +c y +Ġm in +oll ow +Ġc ol +i o +Ġm od +Ġc ount +ĠC om +he s +Ġf in +a ir +i er +âĢ Ķ +re ad +an k +at ch +e ver +Ġst r +Ġpo int +or k +ĠN ew +Ġs ur +o ol +al k +em ent +Ġus ed +ra ct +we en +Ġs ame +ou n +ĠA l +c i +Ġdiff ere +Ġwh ile +---- ---- +Ġg ame +ce pt +Ġs im +.. . +Ġin ter +e k +Ġre port +Ġpro du +Ġst ill +l ed +a h +Ġhe re +Ġwor ld +Ġth ough +Ġn um +ar ch +im es +al e +ĠS e +ĠI f +/ / +ĠL e +Ġre t +Ġre f +Ġtr ans +n er +ut ion +ter s +Ġt ake +ĠC l +Ġcon f +w ay +a ve +Ġgo ing +Ġs l +u g +ĠA meric +Ġspe c +Ġh and +Ġbet ween +ist s +ĠD e +o ot +I t +Ġe ar +Ġagain st +Ġh igh +g an +a z +at her +Ġex p +Ġo p +Ġin s +Ġg r +Ġhel p +Ġre qu +et s +in s +ĠP ro +is m +Ġf ound +l and +at a +us s +am es +Ġp erson +Ġg reat +p r +Ġs ign +ĠA n +' ve +Ġs omet +Ġs er +h ip +Ġr un +Ġ : +Ġt er +ire ct +Ġf ollow +Ġd et +ic es +Ġf ind +1 2 +Ġm em +Ġc r +e red +e x +Ġex t +ut h +en se +c o +Ġte am +v ing +ou se +as h +at t +v ed +Ġsy stem +ĠA s +d er +iv es +m in +Ġle ad +ĠB l +c ent +Ġa round +Ġgo vern +Ġc ur +vel op +an y +Ġc our +al th +ag es +iz e +Ġc ar +od e +Ġl aw +Ġre ad +' m +c on +Ġre al +Ġsupp ort +Ġ1 2 +.. .. +Ġre ally +n ess +Ġf act +Ġd ay +Ġb oth +y ing +Ġs erv +ĠF or +Ġth ree +Ġw om +Ġm ed +od y +ĠThe y +5 0 +Ġex per +t on +Ġe ach +ak es +Ġc he +Ġc re +in es +Ġre p +1 9 +g g +ill ion +Ġg rou +ut e +i k +W e +g et +E R +Ġm et +Ġs ays +o x +Ġd uring +er n +iz ed +a red +Ġf am +ic ally +Ġha pp +ĠI s +Ġch ar +m ed +v ent +Ġg ener +i ent +p le +i et +re nt +1 1 +v es +pt ion +Ġ2 0 +form ation +Ġc or +Ġoff ic +ie ld +Ġto o +is ion +Ġin f +Ġ Z +t he +o ad +Ġp ublic +Ġpro g +r ic +* * +Ġw ar +Ġp ower +v iew +Ġf ew +Ġl oc +Ġdiffere nt +Ġst ate +Ġhe ad +' ll +Ġp oss +Ġst at +re t +ant s +Ġv al +Ġis s +Ġc le +i vers +an c +Ġex pl +Ġan other +Ġ Q +Ġa v +th ing +n ce +W h +Ġch ild +Ġs ince +i red +l ess +Ġl ife +Ġde velop +itt le +Ġde p +Ġp ass +ã ĥ +Ġt urn +or n +Th is +b ers +ro ss +ĠA d +Ġf r +Ġres p +Ġsec ond +o h +Ġ / +Ġdis c +Ġ & +Ġsomet hing +Ġcomp le +Ġ ed +Ġf il +Ġmon th +a j +u c +Ġgovern ment +Ġwith out +Ġle g +Ġd ist +Ġp ut +Ġqu est +an n +Ġpro t +2 0 +Ġne ver +i ence +Ġle vel +Ġar t +Ġth ings +Ġm ight +Ġeff ect +Ġcont ro +Ġc ent +Ġ1 8 +Ġall ow +Ġbel ie +ch ool +ot t +Ġinc re +Ġfe el +Ġres ult +Ġl ot +Ġf un +ot e +Ġt y +ere st +Ġcont in +Ġus ing +Ġb ig +2 01 +Ġas k +Ġb est +Ġ ) +I N +Ġo pp +3 0 +Ġnum ber +in ess +S t +le ase +Ġc a +Ġm ust +Ġd irect +Ġg l +Ġ < +Ġop en +Ġp ost +Ġcom e +Ġse em +ord ing +Ġwe ek +ate ly +it al +Ġe l +ri end +Ġf ar +Ġt ra +in al +Ġp ri +ĠU S +Ġpl ace +Ġfor m +Ġto ld +" : +ain s +at ure +ĠTr ump +Ġst and +Ġ # +id er +ĠF r +Ġne xt +Ġs oc +Ġp ur +Ġle t +Ġl ittle +Ġh um +Ġ i +r on +1 5 +Ġ1 5 +Ġcomm un +Ġm ark +ĠThe re +Ġw r +ĠTh at +Ġin formation +w ays +Ġb us +a pp +Ġinv est +m e +Ġh ard +ain ed +e ad +Ġim port +Ġapp ro +Ġt est +Ġt ri +Ġre st +os ed +Ġf ull +Ġc are +ĠS p +Ġc ase +O N +Ġs k +Ġl ess +Ġ + +Ġpart ic +ĠP l +ab ly +u ck +is hed +ch n +b e +Ġl ist +at or +Ġto p +Ġad v +ĠB e +ru ct +Ġd em +r ation +l ing +g y +re en +g er +Ġh ome +Ġle ft +Ġbet ter +Ġd ata +Ġ1 1 +Ġatt ack +Ġpro ble +l ine +ard s +Ġbe h +r al +ĠH ow +ĠS he +ar ge +Ġ -- +: // +Ġb ro +ĠP h +at s +Ġbu ild +w w +id ed +a im +as es +en cy +Ġm ain +in ed +Ġinclud ing +Ġ { +Ġg ot +Ġint erest +Ġke ep +Ġ X +Ġe as +ain ing +Ġcl ass +âĢ ¦ +ĠN o +Ġv ar +Ġsm all +amp le +A T +Ġ ide +ĠS o +Ġre ce +Ġpol it +Ġm ov +Ġpl an +Ġper cent +iv ing +Ġc amp +Ġp ay +1 4 +s c +is ed +Ġu nt +one y +pl oy +== == +Ġdid n +ĠI nd +el s +ert ain +Ġp os +__ __ +i ver +Ġpro cess +Ġprog ram +if ied +ĠR ep +1 6 +u ro +olog y +at ter +in a +Ġn ame +ĠA ll +Ġf our +Ġret urn +v ious +b s +Ġcall ed +Ġm ove +ĠS c +ir d +Ġgrou p +Ġb re +Ġm en +Ġc ap +t en +e e +Ġd ri +le g +he re +uth or +Ġp at +Ġcur rent +id es +Ġp op +t o +ent ion +Ġal ways +Ġm il +Ġwom en +Ġ1 6 +Ġo ld +iv en +ra ph +ĠO r +r or +ent ly +Ġn ear +ĠE x +re am +s h +Ġ1 4 +Ġf ree +iss ion +st and +ĠC on +al ity +us ed +1 3 +Ġdes ign +Ġch ange +Ġch ang +Ġb o +Ġv is +em ber +Ġb ook +read y +Ġk ill +2 5 +pp ed +Ġa way +Ġab le +Ġcount ry +Ġcon st +ar n +Ġor der +A R +i or +i um +or th +1 8 +ail able +Ġs w +Ġm illion +Ġ1 3 +at ic +t ed +ĠG o +Ġo per +en g +Ġth ing +aj or +con om +ĠCom m +Ġwh y +u red +ur al +Ġs chool +b y +ĠM ar +Ġa ff +Ġd ays +Ġan n +us h +an e +I f +e g +Ġpro f +Ġhe alth +ou th +B ut +ion al +. , +Ġs ol +Ġal ready +Ġ3 0 +Ġchar act +H e +Ġf riend +E S +i ans +ic le +' d +ĠO n +Ġle ast +Ġp rom +Ġd r +Ġh ist +it her +Ġ est +i qu +1 7 +s on +Ġte ll +Ġt alk +oh n +o int +le ction +A N +Ġunt il +au gh +Ġl ater +Ġ ve +Ġv iew +end ing +iv ed +Ġwor d +w are +Ġc ost +Ġen ough +Ġg ive +ĠUn ited +Ġte chn +are nt +O R +Ġp ar +ĠD r +Ġ201 6 +r ist +er ing +Ġ  +Ġl arge +s ide +ac y +cc ess +Ġw in +Ġimport ant +Ġ19 9 +Ġdoes n +Ġ1 7 +Ġbus iness +Ġcle ar +Ġre se +" , +ur y +Ġe qu +as ter +al f +ĠAmeric an +n ect +Ġex pect +ivers ity +Ġo cc +ĠF l +Ġk ind +Ġme an +Ġp ast +Ġde v +Ġb as +le t +ra ft +Ġor gan +Ġde l +Ġper form +Ġst ory +Ġse ason +ĠC ol +Ġcl aim +Ġc ame +Ġwith in +Ġl ine +Ġpro ject +ĠA t +Ġcontro l +end ed +ĠS y +Ġa ir +iz ation +Ġ * +le y +Ġm oney +id d +Y ou +f or +Ġfam ily +Ġm aking +Ġb it +Ġpol ice +Ġhapp en +Ġ vers +on y +u ff +ĠW hen +Ġs it +ide o +l f +is on +Ġsu re +g in +Ġapp ear +Ġl ight +Ġ es +o f +Ġw ater +Ġt imes +n ot +Ġg row +Ġcomp any +ĠT e +ow s +Ġm ar +our ce +i ol +ar m +b r +Ġex ample +Ġcon c +Ġf ore +ĠT o +p ro +E N +ri es +Ġ2 5 +ĠC an +ne y +Ġact ually +Ġe ver +ur ity +ak en +ap s +Ġt ax +Ġm ajor +am a +Ġof ten +er al +Ġhum an +Ġj ob +is ter +Ġav ailable +oc r +en n +a id +iv id +Ġrec ord +? " +Ġs ing +ĠA m +id ence +Ġnew s +st er +Ġe conom +Ġfollow ing +ĠB r +is ing +Ġh our +m ost +um ent +Ġse x +Ġdes c +Ġbec ome +ĠE d +Ġto ok +Ġha ving +Ġprodu ct +a ult +A s +ar ing +Ġme ans +Ġh op +un e +Ġch o +Ġc ertain +Ġn on +Ġde al +2 4 +le ment +oc i +en e +Ġs ide +ĠP r +ĠM ay +Ġre ason +u ed +c hed +ul ation +Ġe lect +Ġoffic ial +Ġposs ible +Ġh old +and s +ot s +Ġc ity +or ies +Ġse ver +Ġchild ren +Ġon ce +Ġact iv +l er +Ġn ight +it ions +ĠJ ohn +a pe +pl ay +Ġd one +Ġl im +Ġwork ing +ĠP res +or ld +e b +ĠC o +Ġb ody +ail s +ut es +ĠM r +Ġwhe ther +Ġa uthor +ro p +Ġpro per +Ġse en +) ; +Ġf ac +ĠS u +Ġcon d +it ing +Ġcour se +Ġ } +-------- -------- +a ign +Ġev ent +Ġen g +Ġp ot +Ġin tern +i am +Ġsh ort +em pt +ã Ĥ +ĠG od +il ar +8 0 +Ġor ig +I S +our n +ab ility +it ive +Ġd am +Ġ1 00 +Ġp ress +Ġdo ing +Ġprot ect +r ing +Ġthough t +Ġquest ion +re w +ĠW ar +Ġsever al +ĠSt ate +Ġg iven +Ġf und +ĠT w +Ġw ent +an ces +w ork +p or +m y +4 0 +Ġar g +art ment +ust om +Ġpol ic +Ġme et +Ġc reat +2 2 +ĠSt ates +Ġg ames +ra w +ut ure +Ġunder stand +ur s +ĠO b +l ish +s y +Ġm akes +Ġw on +ag on +Ġh tt +Ġl ove +ent ial +Ġcomple te +p ar +ĠI m +A L +Ġacc ount + ł +ore d +ver t +Ġ ident +Ġ201 5 +Ġother s +ĠM in +i ber +ver age +The re +ition al +d d +Ġpro b +Ġyou ng +Ġal ong +Ġacc ording +Ġy et +Ġmem bers +ĠWh at +o id +ĠM an +A nd +Ġam ong +a i +Ġem ploy +ĠR es +Ġ > +Ġinv ol +Ġl ow +a f +ĠC ar +Ġh ig +ĠO ne +ĠS ec +in ation +Ġlike ly +Ġan t +ag ed +ĠR uss +Ġb en +Ġre le +F or +b ack +ĠN ot +Ġpres ident +b all +Ġacc ess +ivid ual +ĠD em +ĠE uro +6 0 +Ġkn own +ir l +ĠG r +Ġear ly +u se +iet y +âĢ ĵ +Ġf ight +Ġs ent +Ġto day +Ġmark et +" . +Ġb ased +Ġstr ong +ur ther +Ġde b +m ber +Ġproble m +Ġde ath +Ġsoc ial +im ate +A S +ort un +Ġcamp aign +er y +C h +Ġe y +i ally +Ġm us +w h +p os +Ġ er +Ġsa f +Ġmonth s +ir on +Ġv iol +Ġf ive +Ġst re +Ġplay ers +in c +al d +y ear +a un +Ġsu ccess +Ġpres ent +ere nce +Ġ201 4 +Ġsu gg +Ġpartic ular +Ġtr y +Ġsugg est +ĠCh rist +on es +Ġpri v +2 3 +Ġc rit +Ġl and +Ġloc al +if y +2 9 +Ġa ut +E D +ĠG u +Ġm ult +Ġpolit ical +Ġask ed +Ġfor mer +it ter +ri pt +Ġcl ose +Ġp ract +ĠY ork +Ġget ting +Ġac ross +Ġcom b +Ġbelie ve +Ġ z +Ġto get +Ġtoget her +ĠC ent +ir c +Ġind ividual +ĠM c +2 7 +is k +ĠE ng +Ġf ace +Ġ2 4 +Ġval ue +Ġare a +e v +Ġw rit +ĠPres ident +Ġv ot +Ġke y +Ġm om +p ut +Ġany thing +Ġexper ience +att le +Ġm ind +a ff +om m +Ġf uture +g ed +Ġc ut +Ġto t +it ch +Ġv ideo +Ġinvest ig +Ġn et +ĠM y +r ict +i en +. ) +Ġimp ro +th ough +ward s +Ġcon nect +ĠM ed +sel ves +ens ive +m b +o ber +at ors +A n +Ġ5 0 +Ġre du +res ent +Ġab ove +Ġf re +ĠEuro pe +s w +Ġam ount +ĠA pp +Ġe ither +Ġmil it +Ġan al +Ġf ail +ĠE n +al es +Ġspec ial +Ġbl ack +I T +c her +Ġlook ing +Ġf ire +y n +Ġal most +o on +Ġstud y +Ġm iss +c hes +ro wn +Ġt re +Ġcommun ity +Ġmed ia +Ġf ood +Ġcom es +ĠUn iversity +Ġsing le +Wh at +u ly +Ġh alf +ag ue +h od +ĠRep ublic +Ġstart ed +Ġqu ick +ot o +b ook +Ġiss ue +it or +Ġel se +Ġcons ider +2 6 +ro du +Ġt aken +2 8 +9 9 +ĠW ith +Ġtr ue +Ġw a +Ġtr ad +Ġag o +Ġm ess +ie f +Ġadd ed +o ke +Ġb ad +Ġf av +3 3 +Ġsim ilar +as k +ĠD on +Ġcharact er +ort s +ĠH ouse +Ġreport ed +Ġty pe +v al +i od +ĠHow ever +Ġt arg +Ġent ire +pp ing +Ġhist ory +Ġl ive +ff ic +.... .... +ed eral +Ġtr ying +Ġdisc uss +ĠH ar +ac es +l ished +Ġse lf +os p +re st +Ġro om +el t +Ġf all +ol ution +Ġe t +Ġ x +Ġis n +Ġide a +b o +Ġs ound +ĠD ep +Ġsome one +ci ally +ull y +Ġf oc +Ġob ject +if t +ap er +Ġplay er +Ġr ather +Ġserv ice +as hing +ĠD o +ĠP art +ru g +m on +p ly +Ġm or +Ġnot hing +Ġprov ide +I C +un g +Ġpart y +Ġex ist +Ġm ag +7 0 +Ġr ul +Ġh ouse +Ġbeh ind +Ġhow ever +ĠW orld +Ġs um +Ġapp lic +Ġ ; +Ġfun ction +g r +ĠP ol +Ġfr ont +2 00 +Ġser ies +Ġt em +Ġty p +ill s +Ġo pt +Ġpoint s +Ġbel ow +itt ed +Ġspec ific +Ġ201 7 +um b +Ġr a +Ġpre vious +Ġpre t +re me +Ġc ustom +Ġcour t +ĠM e +Ġre pl +Ġwho le +g o +c er +Ġt reat +ĠA ct +Ġprob ably +Ġle arn +end er +ĠA ss +Ġvers ion +n ow +Ġche ck +ĠC al +R E +min ist +O n +our ces +Ġben ef +Ġd oc +Ġdet er +Ġen c +Ġsu per +Ġadd ress +Ġv ict +Ġ201 3 +Ġme as +t r +Ġf ield +W hen +Ġsign ific +u ge +Ġfe at +Ġcomm on +l oad +Ġbe gin +Ġbr ing +Ġa ction +er man +Ġdesc rib +Ġind ust +Ġwant ed +ri ed +m ing +Ġatt empt +4 5 +f er +Ġd ue +ress ion +# # +Ġsh all +Ġs ix +o o +Ġst ep +Ġp ub +Ġhim self +Ġ2 3 +Ġc op +Ġd est +Ġst op +A C +ib ility +Ġl ab +ic ult +Ġhour s +Ġcre ate +Ġf urther +ĠAmeric a +ĠC ity +Ġd ou +he ad +S T +ĠN orth +c ing +Ġn ational +u le +ĠIn st +Ġt aking +ĠQ u +ir t +Ġre d +Ġrese arch +v iron +ĠG e +Ġbre ak +an a +Ġsp ace +ater ial +Ġrec ent +ĠA b +Ġgener al +Ġh it +Ġper iod +Ġevery thing +ive ly +Ġph ys +Ġsay ing +an ks +Ġc ou +Ġc ult +ac ed +e al +u ation +Ġc oun +l u +Ġinclud e +Ġpos ition +ĠA fter +ĠCan ad +ĠE m +Ġim m +ĠR ed +Ġp ick +Ġcom pl +Ġm atter +re g +e xt +ang u +is c +o le +a ut +Ġcomp et +e ed +f ect +Ġ2 1 +ĠS en +ĠThe se +as ing +Ġcan not +Ġin it +Ġrel ations +ac hed +Ġb ar +Ġ4 0 +ĠT H +Ġ201 2 +Ġv ol +Ġg round +Ġsec urity +Ġup d +il t +3 5 +Ġconc ern +ĠJ ust +Ġwh ite +Ġseem s +ĠH er +pe cially +i ents +Ġann oun +Ġf ig +ight s +Ġst ri +l ike +id s +Ġs us +Ġw atch +Ġ â +Ġw ind +ĠC ont +Ġit self +Ġm ass +A l +y le +iqu e +ĠN ational +Ġab s +Ġp ack +Ġout side +Ġan im +Ġp ain +et er +Ġman ag +du ct +og n +Ġ ] +ĠSe pt +se c +o ff +ĠJ an +Ġf oot +ad es +Ġth ird +Ġm ot +Ġev idence +int on +Ġth reat +a pt +pl es +c le +Ġl o +Ġde cl +Ġit em +med i +Ġrep resent +om b +am er +Ġsignific ant +og raph +s u +Ġc al +i res +00 00 +I D +A M +Ġsim ply +Ġlong er +Ġf ile +O T +c he +S o +ate g +or g +ĠH is +Ġen er +Ġd om +Ġup on +il i +": " +Ġthem selves +Ġcom ing +Ġqu ite +Ġdiff icult +ĠB ar +il ities +re l +end s +c ial +6 4 +Ġwom an +ra p +y r +Ġne cess +ip s +Ġte xt +Ġrequ ire +Ġmilit ary +Ġre view +Ġresp ons +7 5 +Ġsub ject +Ġinst ead +Ġiss ues +Ġg en +" ," +Ġmin utes +Ġwe ap +r ay +am ed +t ime +b l +H ow +Ġc ode +ĠS m +Ġhig her +ĠSt e +r is +Ġp age +Ġstud ents +ĠIn tern +Ġmet hod +ĠA ug +ĠP er +ĠA g +Ġpolic y +ĠS w +Ġex ec +Ġac cept +um e +rib ut +Ġword s +Ġfin al +Ġchang es +ĠDem ocr +Ġfriend s +Ġres pect +Ġe p +Ġcomp an +iv il +Ġdam age +** ** +og le +viron ment +Ġne g +ent al +Ġa p +Ġtot al +iv al +! " +l im +Ġneed s +Ġag re +Ġdevelop ment +Ġa ge +ip le +2 1 +Ġresult s +ĠA f +S h +Ġg un +ĠOb ama +ro ll +Ġ @ +Ġright s +ĠB rit +Ġrun ning +Ġwas n +Ġp ort +Ġr ate +Ġpret ty +Ġtarg et +Ġsa w +Ġc irc +Ġwor ks +ic ro +al t +o ver +ww w +Th at +l ier +Ġevery one +ud e +Ġp ie +idd le +ra el +Ġr ad +Ġbl ock +Ġw alk +T o +ã ģ +n es +ĠA ust +a ul +ro te +ĠS outh +ess ion +op h +Ġshow s +Ġs ite +Ġj o +Ġr isk +cl us +l t +Ġin j +id ing +ĠS pe +Ġch all +ir m +Ġ2 2 +itt ing +st r +Ġh y +L E +ke y +Ġbe gan +at ur +ashing ton +l am +ĠD av +b it +Ġs ize +ĠP ar +3 8 +ourn al +f ace +Ġdec ision +Ġl arg +Ġj ud +re ct +Ġcontin ue +ĠO ct +ove red +ĠI nt +==== ==== +Ġp arent +ĠW ill +Ġeas y +Ġd rug +ang er +Ġs ense +Ġd i +id ay +Ġener gy +ist ic +Ġass oci +ar ter +ob al +e ks +ĠE l +ur ch +Ġg irl +o e +it le +Ġ2 8 +ĠC he +Ġrequ est +Ġso on +Ġh ost +k y +Ġst ates +om es +Ġm aterial +le x +Ġmom ent +Ġan sw +on se +Ġes pecially +Ġn orm +Ġserv ices +p ite +r an +Ġro le +4 4 +) : +Ġc red +C l +____ ____ +Ġm at +Ġl og +ĠCl inton +O U +Ġoff ice +Ġ2 6 +Ġch arg +Ġtr ack +m a +Ġhe art +Ġb all +Ġperson al +Ġbuild ing +n a +s et +b ody +ĠBl ack +Ġincre ase +itt en +Ġneed ed +3 6 +3 2 += " +Ġl ost +Ġbec ame +Ġgrou ps +ĠM us +Ġw rote +ĠP e +Ġpro p +j oy +à © +ĠWh ite +Ġde ad +. ' +Ġhtt p +Ġwe bs +O S +Ġins ide +Ġwr ong +Ġstat ement +Ġ ... +y l +Ġfil m +Ġmus ic +Ġsh are +ific ation +Ġre lease +Ġfor ward +Ġst ay +Ġcomp ut +it te +s er +Ġorig inal +Ġc ard +Ġc and +Ġd iv +at ural +Ġfav or +O M +Ġc ases +us es +Ġse ction +Ġle ave +g ing +ov ed +ĠW ashington +3 9 +ĠG l +Ġrequ ired +act ion +ap an +o or +it er +ĠK ing +Ġcount ries +ĠG erman +ll ing +Ġ2 7 +3 4 +Ġquest ions +Ġpr im +Ġc ell +Ġsh oot +Ġany one +ĠW est +Ġaff ect +ep end +Ġon line +ĠIs rael +ĠSept ember +Ġab ility +Ġcont ent +is es +Ġre ve +Ġl aun +Ġind ic +Ġfor ce +c ast +Ġso ld +av ing +f l +Ġso ft +Ġcompan ies +ce ed +Ġart icle +Ġa ud +Ġre v +Ġed uc +Ġplay ing +0 5 +Ġhe ld +ct or +Ġrele ased +Ġf ederal +3 7 +Ġad minist +Ġinter view +Ġinst all +Ġrece ived +Ġs ource +u k +P h +Ġser ious +Ġcre ated +Ġc ause +Ġim medi +Ġdef in +u el +ĠDep artment +ct ions +ĠC our +ĠN ow +z e +it es +it ution +Ġl ate +Ġspe ak +n ers +Ġleg al +ar i +ĠC or +Ġwe eks +Ġmod el +Ġp red +Ġex act +B C +ĠB y +IN G +os ing +Ġt akes +Ġreg ard +Ġopp ortun +Ġpr ice +Ġ19 8 +ĠA pr +f ully +Ġor d +Ġproble ms +ru ction +h am +ĠC ount +le ge +Ġlead ers +E T +le v +Ġde ep +olog ical +es e +h aps +ĠS ome +Ġp ers +Ġcont ract +Ġrelations hip +s p +ou d +Ġb ase +4 8 +m it +A d +anc ial +Ġcons um +Ġpot ential +Ġl angu +re m +et h +Ġrel ig +ress ed +6 6 +Ġl ink +Ġl ower +ay er +ĠJ une +Ġf em +un t +er c +ur d +Ġcont act +Ġ ill +Ġm other +Ġest ab +h tt +ĠM arch +ĠB ro +ĠCh ina +Ġ2 9 +Ġs qu +Ġprov ided +Ġa verage +as ons +Ġ201 1 +Ġex am +l in +5 5 +n ed +Ġper fect +Ġt ou +al se +u x +Ġbu y +Ġsh ot +Ġcol lect +Ġph ot +Ġplay ed +Ġsur pr +Ġofficial s +Ġsim ple +av y +Ġindust ry +Ġhand s +g round +Ġp ull +Ġr ound +Ġus er +Ġr ange +u ary +Ġpriv ate +op s +e es +Ġw ays +ĠM ich +Ġve h +Ġex cept +Ġter ms +im um +pp er +I ON +ore s +ĠDr agon +ou l +Ġd en +Ġperform ance +Ġb ill +c il +4 7 +Ġen vironment +Ġex c +ad d +Ġwor th +Ġp ict +Ġch ance +Ġ201 8 +b or +Ġspe ed +ict ion +Ġal leg +ĠJ apan +at ory +re et +Ġm atch +ĠI I +Ġst ru +ord er +Ġst e +Ġl iving +Ġst ruct +in o +Ġse par +her n +Ġresp onse +Ġen joy +Ġv ia +A D +um ents +ace book +Ġmem ber +ib r +iz ing +Ġto ol +ĠM on +ĠWh ile +h ood +ĠA ng +ĠD ef +Ġoff er +T r +a ur +Ġturn ed +ĠJ uly +d own +an ced +Ġrec ently +ĠE ar +Ġc e +ĠSt ar +ĠC ong +rough t +Ġbl ood +Ġhop e +Ġcom ment +ain t +Ġar ri +il es +Ġpartic ip +ough t +ri ption +0 8 +4 9 +Ġg ave +Ġse lect +Ġkill ed +sy ch +Ġgo es +i j +Ġc oll +Ġimp act +at ives +ĠS er +0 9 +ĠAug ust +Ġb oy +d e +ĠD es +Ġf elt +U S +Ġexpect ed +Ġim age +ĠM ark +cc ording +o ice +E C +ĠM ag +en ed +h old +ĠP ost +Ġpre vent +N o +Ġinvol ved +Ġey es +Ġquick ly +A t +un k +Ġbeh av +Ġ ur +Ġl ed +c ome +e y +Ġcand id +Ġear lier +Ġfoc us +et y +P ro +led ge +ix ed +ill ed +Ġpop ular +A P +Ġset t +l ight +Ġvar ious +in ks +Ġlevel s +Ġro ad +ell ig +ab les +he l +itte e +ĠG ener +y pe +Ġhe ard +ic les +Ġm is +Ġus ers +ĠS an +Ġimpro ve +Ġf ather +Ġse arch +The y +v il +Ġprof ess +Ġkn ew +Ġl oss +Ġev ents +6 5 +Ġb illion +0 7 +0 2 +ĠNew s +ĠA M +Ġco ver +w here +ens ion +Ġb ott +Ġare as +en ces +op e +ĠTw itter +a el +Ġget s +ĠGo ogle +Ġs n +i ant +Ġv ote +Ġnear ly +Ġinclud ed +Ġrec ogn +z z +m m +al ed +Ġhappen ed +0 4 +Ġh ot +Ġwho se +Ġc ivil +Ġsu ff +o es +it iz +ĠSy ri +Ġresp ond +Ġh on +Ġfeat ures +Ġeconom ic +ĠApr il +r im +Ġtechn ology +Ġo ption +ag ing +Ġpur ch +R e +Ġl at +ch ie +is l +Ġrec omm +u f +Ġtr aining +Ġeffect s +Ġf ast +Ġ201 0 +Ġocc ur +Ġwebs ite +Ġem ail +Ġs ens +e ch +Ġo il +Ġinf lu +Ġcurrent ly +ĠS ch +ĠAd d +Ġgo al +Ġsc ient +Ġcon v +1 00 +em y +Ġdec ided +Ġtra vel +Ġm ention +L L +0 3 +Ġe lection +Ġph one +Ġlook s +Ġsit uation +Ġc y +Ġh or +b ed +ĠCour t +a ily +av es +Ġqu ality +ĠCom p +w ise +Ġt able +Ġst aff +ĠW ind +et t +Ġtri ed +ide red +Ġadd ition +Ġb ox +Ġl ack +ar ily +Ġw ide +Ġm id +Ġbo ard +ys is +Ġant i +h a +Ġd ig +en ing +Ġd ro +C on +6 8 +Ġsl ow +b ased +se qu +Ġp ath +E x +ak er +Ġwork ed +Ġp en +Ġeng ine +Ġlook ed +ĠSu per +ĠS erv +Ġvict im +U n +Ġproper ty +Ġint rodu +Ġexec ut +ĠP M +L e +Ġcol or +ĠM ore +Ġ6 0 +Ġnet work +Ġd ate +c ul +id ge +Ġext ra +3 1 +Ġs le +6 7 +Ġw ond +Ġreport s +j ust +ĠAust ral +Ġcap ital +Ġen s +Ġcomm and +Ġallow ed +Ġpre p +Ġca pt +h ib +Ġnum bers +ch an +Ġf air +m p +om s +Ġre ach +W ith +t ain +Ġbro ad +Ġcou ple +ec ause +ly ing +ĠF eb +Ġsc reen +Ġl ives +Ġpri or +ĠCong ress +A r +Ġappro ach +Ġe mer +ar ies +ĠD is +s erv +ĠN e +Ġbu ilt +c ies +Ġre pe +Ġrul es +for ce +ĠP al +Ġfin ancial +Ġcons idered +ĠCh ar +n ces +ĠI S +Ġb rought +Ġb i +i ers +ĠS im +O P +Ġproduct s +Ġvis it +Ġdoc ument +Ġcon duct +Ġcomplete ly +in ing +ĠCal if +ib ly +Ġwr itten +ĠT V +em ents +Ġd raw +O ne +Ġpub lished +Ġsec ret +r ain +he t +ĠF acebook +ond ay +ĠU p +Ġsex ual +Ġth ous +ĠP at +Ġ ess +Ġstand ard +Ġar m +g es +ect ion +Ġf ell +Ġfore ign +an i +ĠFr iday +Ġreg ular +in ary +Ġincre ased +Ġus ually +Ġdem on +Ġd ark +Ġadd itional +ro l +ĠO f +Ġprodu ction +! ! +und red +Ġintern ational +id ents +ĠF ree +rou p +Ġr ace +Ġm ach +Ġh uge +A ll +le ar +ove mber +Ġto wn +Ġatt ention +ĠO ff +y ond +ĠThe n +f ield +Ġter ror +ra z +ĠB o +Ġmeet ing +ĠP ark +Ġar rest +Ġf ear +Ġa w +ĠV al +or ing +' , +Ġext reme +ar r +Ġwork ers +A fter +Ġ3 1 +n et +am ent +Ġdirect ly +Ġpop ulation +ub e +ĠOct ober +ĠI N +ĠJan uary +5 9 +ĠDav id +Ġc ross +ce mber +ĠF irst +Ġmess age +ir it +Ġn ation +Ġp oll +is ions +Ġansw er +n y +is ode +Ġcar ry +ĠRuss ia +Ġhe ar +eng th +ro y +Ġn atural +in ally +Ġdo g +m itted +Ġtr ade +Ġsub st +Ġmult iple +ĠAf ric +Ġf ans +Ġs ort +Ġgl obal +ic ation +ĠW ed +ar a +Ġa chie +Ġlangu age +ve y +Ġt al +Ġnecess ary +Ġdet ails +Ġs en +ĠS und +ĠRe g +ĠR ec +0 6 +Ġs il +ress ive +Ġmed ical +un ch +orn ia +Ġu nd +f ort +oc ks +ĠM onday +ues day +c raft +7 7 +ur t +Ġ ver +ĠH ill +Ġrece ive +Ġmor ning +es tern +Ġb ank +Ġs at +ir th +ĠH igh +Ġdev ice +ĠTH E +ĠCent er +Ġsaf e +Ġp le +ĠCanad a +Ġsystem s +Ġass ist +Ġsur v +Ġb attle +ĠS oc +vert is +S he +Ġp aper +Ġgrow th +Ġc ast +S c +Ġpl ans +ll ed +Ġpart s +Ġw all +Ġmove ment +Ġpract ice +im ately +Ġdis play +Ġsomet imes +om p +ĠP aul +ĠY es +k ing +5 8 +o ly +Ġs on +Ġav oid +ok es +ĠJ ew +Ġto wards +as c +Ġ // +ĠK ore +Ġtalk ing +Ġcor rect +Ġsp ent +ic ks +i able +e ared +Ġter m +Ġwant s +om ing +Ġ ut +Ġdou b +Ġfor ces +Ġp lease +6 9 +ĠN ovember +at form +ond on +Ġon es +Ġimmedi ately +ĠRuss ian +ĠM et +Ġde g +Ġparent s +C H +ĠAmeric ans +al y +ĠM od +Ġsh own +Ġcond itions +Ġst uff +Ġre b +ĠY our +Ġinclud es +n own +ĠS am +Ġexper ien +m ission +ĠE ven +augh t +Ġannoun ced +ĠRepublic an +Ġdeter min +Ġdescrib ed +ĠCount y +( ) +Ġdo or +Ġchang ed +Ġne igh +ĠH ere +Ġcle an +Ġp an +ĠDe cember +ĠEurope an +ir ing +ap ter +Ġcl ub +ĠT uesday +Ġp aid +ĠN et +Ġattack s +Ġcharact ers +Ġal one +Ġdirect or +d om +Ġ3 5 +Ġl oad +Ġr out +ĠCalif ornia +Ġfin ally +Ġr ac +Ġcont r +Ġexact ly +res h +p ri +ĠIs lam +Ġn ature +Ġcare er +Ġlat est +Ġcon vers +ĠS l +p ose +ci ent +ĠIn c +iv ity +8 8 +ĠA tt +ĠM or +nes day +Ġwe ight +k en +Ġnot e +Ġteam s +Ġ \ +air s +ĠG reen +Ġh undred +on ent +Ġstre ng +Ġcons ist +ic ated +Ġreg ul +Ġl ic +ast ic +Ġt en +urs day +ellig ence +ous ly +ĠU K +B I +Ġcost s +Ġind epend +ĠA P +Ġnorm al +Ġh om +Ġob vious +Ġs we +Ġst ar +Ġread y +ac her +Ġimp lement +g est +Ġs ong +ĠG et +ĠL ab +Ġinterest ing +us ing +Ġg iving +ĠSund ay +Ġet c +Ġm iddle +Ġrem ember +r ight +os ition +ut ions +Ġm ax +4 6 +Ġyour self +Ġdem and +Ġtreat ment +Ġd anger +ĠC ons +Ġgu y +ĠBrit ish +Ġphys ical +Ġrel ated +Ġrem ain +Ġcould n +Ġref er +Ġc itiz +b ox +EN T +bo ard +Ġin n +I G +er o +ĠSt reet +osp ital +ren ch +cher s +Ġst ra +O L +ag er +ĠA N +Ġeas ily +I A +en ge +in y +Ġcl os +ock ed +Ġus es +ĠC oun +I m +u ild +? ? +m ore +Ġan g +Ġwr ite +ol ute +5 7 +Ġlead er +Ġread ing +< / +Ġaut om +est s +4 3 +Ġleg isl +ĠG old +Ġdesign ed +ĠS T +ĠLe g +a res +Ġbe aut +ĠT ex +Ġappear s +Ġstru gg +ĠR om +Ġ 00 +Ġcho ice +Ġparticular ly +ĠF rom +op er +ĠL ondon +ann ed +Ġallow s +ob ile +Ġdiffere nce +âĢ ¢ +ĠV iew +ĠWed nesday +Ġal though +Ġrel ative +Ġapplic ation +ate ver +Ġare n +Ġmy self +Ġim ag +Ġdis e +Ġsoc iety +Ġfre qu +ĠEng lish +Ġpo or +ĠD ay +Ġwrit ing +Ġse ven +Ġstart ing +Ġb ud +Ġpr int +ĠTr ans +uf act +ĠSt ud +n ew +Ġcr im +Ġg ives +Ġco ol +a e +i ance +ĠGener al +Ġthink ing +Ġsa ve +Ġlim ited +ĠPart y +Ġmean ing +p en +ow ers +ĠJ ack +E M +Ġn ice +ru pt +Ġg as +Ġe ight +Ġfe et +Ġeff ort +Ġ ign +ic it +B l +co in +Ġop in +Ġbr ain +Wh ile +he st +ĠTh ursday +Ġwould n +augh ter +Ġtou ch +le ments +Ġstud ies +Ġcent er +c ont +or ge +Ġcomput er +Ġinvestig ation +P l +or ks +Ġ200 8 +Ġincre asing +Ġst ore +Ġcom ments +Ġb al +m en +Ġdo ll +Ġl iber +Ġw ife +Ġlaw s +atur day +it ness +Ġmod ern +ĠS k +Ġadminist ration +Ġopportun ity +Ġs al +Ġpower ful +M y +Ġclaim s +ĠEar th +ord s +Ġt itle +Ġes c +n ame +N ot +om en +Ġbe yond +Ġc amer +Ġse ll +it ute +ear ch +Ġapp l +im ent +4 2 +ĠAr t +Ġun f +Ġviol ence +ur g +ĠE ast +Ġcomp ared +Ġopt ions +Ġthrough out +Ġv s +ig r +. [ +ac hes +7 8 +Ġfil es +F L +E L +ar ian +ĠJ ames +ĠA ir +an ch +Ġdet ail +Ġpie ce +P S +Ġn amed +Ġeduc ation +Ġdri ve +Ġitem s +Ġstud ent +ic ed +: : +ic o +Ġth row +Ġsc ene +Ġcomple x +Ġ200 9 +Ġpre c +ĠB re +7 9 +Ġcon cept +Ġstat us +am ing +Ġd ied +Ġknow ledge +Ġbegin ning +O D +ru ary +Ġcertain ly +Ġgu ys +Ġsl ight +in n +ound s +Ġf ine +Ġf at +ic ations +Ġper haps +ĠA nt +Ġinc ome +Ġhtt ps +Ġmajor ity +port s +st on +Ġgreat er +Ġfe ed +ent ially +Ġsaf ety +Ġun ique +and om +Ġg one +Ġshow ed +Ġhist or +Ġcoun ter +i us +id a +Ġlead ing +i pe +Ġs end +ĠDon ald +er ve +Ġdef ense +ines e +Ġy es +ĠF ire +ĠMus lim +ra q +Ġcontin ued +os h +Ġprov ides +Ġpr ison +ĠP re +Ġhapp y +Ġeconom y +Ġtr ust +ag s +ĠG ame +Ġweap ons +um an +ĠC le +it ation +Ġanal ysis +ĠT imes +Ġsc ience +- > +Ġfig ure +Ġdis app +ent y +Ġsoft ware +Ġu lt +Ġoffic ers +N ew +I s +Ġrem ains +ĠInd ia +Ġp sych +ri ef +Ġc at +es c +Ġob serv +Ġst age +ĠD ark +Ġent er +ch ange +Ġpass ed +Ġdes pite +ĠO ut +Ġmov ie +r s +Ġv oice +m ine +ĠPl ay +Ġto ward +ĠT er +Ġreg ion +Ġval ues +or ters +Ġm ount +Ġoffic er +ĠO ther +b an +Ġh ous +w ood +ro om +I V +ĠS un +se e +ĠO ver +ro g +9 0 +Ġl ay +ĠT ur +a wn +Ġpress ure +ĠS ub +Ġbook s +ed om +ĠS and +A A +ag o +Ġre asons +f ord +Ġactiv ity +U T +N ow +ĠSen ate +ce ll +n ight +Ġcall s +in ter +Ġlet ter +ĠR ob +ĠJ e +Ġcho ose +ĠL aw +G et +B e +Ġro b +Ġtyp es +Ġpl atform +Ġqu arter +R A +ĠT ime +Ġmay be +ĠC r +9 5 +p re +Ġmov ing +Ġl if +Ġgo ld +Ġs om +Ġpat ients +Ġtr uth +ĠK e +ur ance +ant ly +m ar +Ġchar ge +ĠG reat +Ġce le +---------------- ---------------- +Ġro ck +ro id +an cy +Ġcred it +a ud +B y +ĠE very +Ġmov ed +ing er +rib ution +Ġn ames +Ġstra ight +ĠHe alth +ĠW ell +Ġfe ature +Ġr ule +Ġsc he +in ated +ĠMich ael +ber g +4 1 +il ed +b and +Ġcl ick +ĠAng el +on ents +Â Ń +ĠI raq +ĠS aturday +Ġa ware +p art +Ġpat tern +O W +ĠL et +Ġgr ad +ign ed +Ġassoci ated +Ġst yle +n o +i ation +a ith +il ies +Ġst ories +ur ation +Ġindividual s +ĠâĢ ¦ +m iss +ĠAss oci +ish ing +ab y +Ġsum mer +ĠB en +Ġ3 2 +Ġar ch +ut y +ĠTex as +h ol +Ġfull y +Ġm ill +Ġfollow ed +ĠB ill +ĠInd ian +ĠSec ret +ĠB el +ĠFeb ruary +Ġjob s +Ġseem ed +ĠGo vern +i pped +Ġreal ity +Ġl ines +Ġp ark +Ġmeas ure +ĠO ur +I M +Ġbro ther +Ġgrow ing +Ġb an +Ġest im +Ġc ry +ĠS chool +Ġme chan +ĠO F +ĠWind ows +Ġr ates +ĠO h +Ġpos itive +Ġcult ure +ist ics +ic a +Ġh ar +y a +ite ly +i pp +Ġm ap +en cies +ĠWill iam +I I +ak ers +5 6 +ĠM art +ĠR em +Ġal tern +it ude +Ġco ach +row d +D on +Ġk ids +Ġj ournal +Ġcor por +Ġf alse +Ġwe b +Ġsle ep +Ġcont ain +Ġst o +Ġb ed +iver se +ĠR ich +ĠCh inese +Ġp un +Ġme ant +k nown +Ġnot ice +Ġfavor ite +a ven +Ġcond ition +Ġpur pose +) ) +Ġorgan ization +Ġchall eng +Ġman ufact +Ġsus p +ĠA c +Ġcrit ic +un es +uc lear +Ġm er +vent ion +Ġ8 0 +Ġm ist +ĠU s +ĠT or +htt p +ol f +Ġlarg er +Ġadv ant +Ġrese ar +Ġact ions +m l +Ġke pt +Ġa im +, ' +c ol +Ġbenef its +if ying +Ġact ual +ĠIntern ational +Ġveh icle +Ġch ief +Ġeff orts +ĠLe ague +ĠM ost +Ġwa it +Ġad ult +Ġover all +Ġspe ech +Ġhigh ly +Ġfem ale +Ġer ror +Ġeffect ive +5 4 +Ġenc our +w ell +Ġfail ed +Ġcons erv +Ġprogram s +Ġt rou +Ġa head +5 00 +vertis ement +I P +ĠF ound +p ir +Ġ % +Ġcr ime +and er +Ġloc ation +ĠI ran +Ġbehav ior +az ing +Ġr are +Ġem b +Ġca used +Ġsh ip +Ġact ive +Ġcont ribut +Ġg reen +Ġac qu +Ġref lect +ven ue +Ġf irm +Ġb irth +] . +Ġclear ly +Ġem ot +Ġag ency +ri age +Ġmem ory +9 8 +S A +ĠSe e +ac ing +C C +Ġbig gest +Ġr ap +Ġbas ic +Ġb and +e at +Ġsus pect +ĠM ac +Ġ9 0 +m ark +ist an +Ġsp read +am s +k i +as y +ra v +ĠR ober +Ġdemon str +r ated +Ġabs olute +Ġpl aces +Ġim pl +ibr ary +Ġc ards +Ġdest roy +Ġv irt +ve re +Ġapp eared +y an +p oint +Ġbe g +Ġtem per +s pe +ant ed +ear s +ĠD irect +Ġl ength +Ġbl og +am b +Ġint eg +Ġres ources +ac c +if ul +Ġsp ot +Ġfor ced +Ġthous ands +ĠMin ister +Ġqu al +ĠF rench +at ically +Ġgener ally +Ġdr ink +Ġth us +I L +od es +Ġappro pri +ĠRe ad +Ġwh om +Ġey e +Ġcol lege +Ġ4 5 +ire ction +Ġens ure +Ġapp arent +id ers +Ġrelig ious +Ġmin or +ol ic +Ġt ro +ĠWh y +rib ute +m et +Ġprim ary +Ġdevelop ed +Ġpe ace +Ġsk in +st e +av a +Ġbl ue +Ġfam ilies +Ġ ir +Ġapp ly +Ġin form +ĠSm ith +C T +i i +Ġlim it +Ġres ist +........ ........ +um n +Ġconf lic +Ġtw e +ud d +ĠT om +Ġl iter +qu e +b on +Ġha ir +Ġevent ually +Ġp us +Ġhelp ed +Ġag g +or ney +ĠApp le +Ġf it +ĠS ur +Ġpre m +Ġs ales +Ġsecond s +Ġstreng th +Ġfeel ing +¿ ½ +Ġt our +Ġknow s +o om +Ġex erc +Ġsom ew +ï ¿½ +> > +Ġsp okes +Ġide as +Ġreg ist +so ft +ĠD el +ĠP C +Ġpro pos +Ġlaun ch +Ġbott om +T H +ĠP lease +v est +it z +ĠIn ter +Ġsc ript +Ġr at +ar ning +Ġ il +ĠJ er +ĠA re +Ġwh atever +ok en +ci ence +Ġmod e +Ġag ree +Ġs ources +Ġinit ial +Ġrest rict +Ġwond er +us ion +## ## +ĠS il +vil le +Ġb urn +t w +as ion +Ġ £ +Ġn or +u ing +Ġre ached +Ġs un +Ġc ateg +ig ration +Ġc ook +Ġprom ot +Ġm ale +Ġcl imate +Ġf ix +Ġalleg ed +U R +all ed +Ġim ages +C ont +ot a +Ġschool s +i os +Ġd rop +Ġst ream +ĠM o +Ġprevious ly +al ing +Ġp et +Ġdou ble +Ġ( @ +ann el +Ġdef ault +t ies +Ġr ank +ĠD ec +ĠCoun cil +Ġweap on +Ġst ock +Ġanal y +ĠSt r +Ġpict ure +ĠPol ice +f erence +Ġcent ury +Ġcitiz ens +Ġon to +Ġexp and +Ġhe ro +ĠS ol +Ġw ild +Ġupd ate +Ġcustom ers +r ont +d ef +Ġl ik +Ġcrim inal +ĠChrist ian +S P +7 6 +Ġle aving +Ġother wise +ĠD ist +Ġbas is +5 2 +5 3 +ic ip +ĠB er +Ġrecomm end +Ġfl oor +Ġc rowd +ol es +Ġ7 0 +Ġcent ral +ĠE v +Ġd ream +Ġdown load +Ġconf ir +ĠTh om +Ġwind ow +Ġhapp ens +Ġun it +Ġt end +Ġs pl +Ġbec omes +Ġfight ing +Ġpred ict +ĠP ress +ĠP ower +Ġhe avy +ak ed +Ġf an +or ter +ate gy +B A +iz es +Ġsp end +H ere +Ġ200 7 +Ġad op +ĠH am +Ġfoot ball +ĠP ort +od ay +5 1 +amp ions +Ġtrans fer +h t +Ġ3 8 +ter m +ac ity +Ġb ur +] , +tern al +r ig +b ut +Ġthere fore +ĠB ecause +res p +re y +Ġm ission +S ome +Ġnot ed +Ġass um +Ġdise ase +Ġed it +Ġprog ress +r d +ĠB rown +oc al +Ġadd ing +Ġra ised +ĠAn y +Ġt ick +Ġsee ing +ĠPe ople +Ġagre ement +Ġser ver +Ġw at +Ġdeb ate +Ġsupp osed +il ing +Ġlarg est +Ġsuccess ful +ĠP ri +ĠDemocr atic +Ġj ump +ĠSyri a +Ġown ers +Ġoff ers +Ġshoot ing +Ġeff ic +se y +Ġha ven +ver se +te red +ĠL ight +im al +ĠB ig +Ġdef end +Ġbe at +Ġrecord s +% ) +Ġsc en +Ġemploy ees +Ġdev ices +he m +Ġcom mer +ĠM ex +Ġbenef it +ĠPro f +Ġil leg +Ġsur face +ĠAl so +Ġh arm +ing ly +w ide +ĠA lex +Ġsh ut +ĠC ur +Ġl ose +p m +Ġchall enge +se mb +Ġst ation +Ġint elligence +Ġacc ur +ĠFl or +Ġrequ ires +ĠM al +b um +Ġh ospital +Ġsp irit +Ġoff ered +Ġprodu ce +ĠComm un +Ġcreat ing +Ġcr is +s pect +Ġend ed +Ġd aily +Ġvot ers +land s +i as +i h +on a +Ġsm art +ĠOff ice +ĠL ord +ri al +ĠIntern et +Ġcirc um +Ġextreme ly +' . +Ġopin ion +ĠM il +Ġg ain +B S +ĠF in +y p +Ġuse ful +Ġbud get +Ġcom fort +is f +Ġback ground +el ine +Ġep isode +Ġen emy +Ġtri al +Ġestab lish +d ate +ĠC ap +Ġcontin ues +Ġshow ing +ĠUn ion +w ith +Ġpost ed +ĠSy stem +Ġe at +ri an +Ġr ise +ĠGerman y +il s +Ġsign ed +Ġv ill +Ġgr and +m or +ĠEng land +Ġproject s +um ber +Ġconf erence +z a +Ġrespons ible +ĠAr ab +Ġlearn ed +âĢĶ âĢĶ +i pping +ĠGe orge +O C +Ġreturn ed +ĠAustral ia +Ġb rief +Q u +Ġbr and +ill ing +ab led +Ġhig hest +Ġtr ain +ĠComm ission +wh ile +Ġn om +cept ion +Ġm ut +ĠBl ue +Ġinc ident +v ant +8 6 +ĠI D +Ġn uclear +7 4 +ĠL ike +ĠR E +ĠM icro +l i +m ail +Ġcharg es +8 9 +Ġad just +ad o +Ġear th +N A +Ġpr ices +P A +Ġd raft +Ġrun s +Ġcandid ate +ens es +Ġmanag ement +ĠPh il +ĠM iss +Ġte ach +g ram +Ġunderstand ing +a it +ic ago +A dd +ĠE p +sec ut +Ġsepar ate +Ġinst ance +Ġe th +Ġun less +**** **** +ĠF ore +in ate +Ġoper ations +S p +Ġf aith +g ar +ĠCh urch +ron ic +Ġconf ig +os ure +Ġactiv ities +Ġtrad itional +Ġ3 6 +Ġd irection +Ġmach ine +Ġsur round +Ġp ush +un ction +ĠE U +Ġeas ier +Ġarg ument +G B +Ġm icro +Ġsp ending +iz ations +Ġthe ory +ad ow +Ġcall ing +ĠL ast +Ġd er +Ġinflu ence +Ġcomm it +Ġph oto +Ġun c +ist ry +g n +ast e +ack s +Ġdis p +ad y +d o +ĠG ood +Ġ ` +Ġw ish +Ġreve aled +Âł Âł +l ig +Ġen force +ĠComm ittee +Ġche m +Ġmil es +Ġinterest ed +Ġsol ution +ic y +in ct +Ġ- > +ĠD et +Ġrem oved +Ġcomp ar +e ah +Ġpl ant +ĠS ince +Ġachie ve +Ġadvant age +Ġslight ly +b ing +Ġpl aced +u nder +201 5 +ĠM ad +Ġt im +os es +Ġc ru +ĠR ock +Ġmost ly +Ġneg ative +Ġset ting +Ġprodu ced +Ġm ur +Ġconnect ion +ĠM er +Ġdri ver +Ġexecut ive +Ġass ault +Ġb orn +ĠV er +t ained +Ġstruct ure +Ġredu ce +Ġdec ades +Ġd ed +u ke +ĠM any +idd en +Ġle ague +S e +Ġjo in +Ġdis co +Ġd ie +c ks +act ions +Ġass ess +ag n +Ġgo als +our s +I R +Ġsen ior +ill er +m od +ip ment +oc ol +u y +ĠQ ue +Ġpart ies +ir gin +Ġle arning +it able +Ġstre et +Ġcamer a +A pp +Ġsk ills +b re +c ious +Ġcele br +ĠFr anc +Ġexist ing +Ġwill ing +l or +Ġ id +ĠSp ace +Ġcrit ical +ĠL a +ortun ately +Ġser ve +Ġc old +Ġspec ies +T S +Ġanim als +ĠB ay +Ġold er +ĠU nder +est ic +ĠT re +Ġte acher +Ġpre fer +v is +Ġth read +ĠM att +Ġmanag er +ãĥ » +Ġprofess ional +ĠV ol +Ġnot es +The se +ul a +Ġf resh +ent ed +u zz +ed y +clus ion +ĠR el +Ġdoub t +E O +Ġopen ed +ĠB it +Ad vertisement +Ġgu ess +ĠU N +Ġse qu +Ġexpl ain +ott en +Ġatt ract +ak s +Ġstr ing +Ġcont ext +oss ible +ĠRepublic ans +Ġsol id +Ġc ities +Ġask ing +Ġr andom +u ps +ur ies +ar ant +dd en +g l +ĠFlor ida +Ġdep end +ĠSc ott +Ġ3 3 +Ġi T +ic on +Ġmention ed +Ġ2 000 +Ġclaim ed +Ġdefin itely +ul f +Ġc ore +Ġopen ing +ĠCon st +wh ich +ĠT ra +A G +7 2 +Ġbelie ved +ad a +Ġ4 8 +ĠSec urity +yr ight +ĠP et +ĠL ou +Ġhold ing +======== ======== +Ġ ice +Ġb row +Ġauthor ities +h ost +w ord +Ġsc ore +ĠD iv +Ġcell s +Ġtrans l +Ġneigh bor +Ġrem ove +u ct +Ġdist rict +ĠA ccording +Ġwor se +Ġconcern s +Ġpresident ial +Ġpolic ies +ĠH all +7 3 +Ġh us +A Y +Ġ200 6 +ĠJ ud +Ġindepend ent +ĠJust ice +ili ar +pr int +igh ter +Ġprotect ion +z en +Ġsu dden +h ouse +ĠJ es +P R +ĠIn f +Ġb ul +Ġ _ +ĠServ ice +ĠP R +Ġstr ategy +ff ect +Ġgirl s +Ġmiss ing +oy al +ĠTe am +ul ated +Ġd at +Ġpolit ics +ab or +A ccording +Ġspe ll +Ġg raph +ort hern +T C +A b +Ġlab or +is her +Ġk ick +ĠiT unes +Ġstep s +pos es +Ġsmall er +E n +ber t +Ġro ll +Ġresear chers +Ġcl osed +Ġtrans port +Ġlaw y +________ ________ +ĠCh icago +Ġas pect +Ġn one +Ġmar riage +9 6 +Ġe lements +ĠF re +ĠS al +Ġd ram +F C +t op +e qu +Ġhe aring +Ġsupport ed +Ġtest ing +co hol +Ġmass ive +Ġst ick +Ġgu ard +is co +ph one +F rom +How ever +Ġb order +Ġcop y +ograph y +l ist +7 1 +Ġown er +cl ass +ru it +r ate +ĠO nce +Ġdig ital +Ġt ask +ER S +Ġinc red +t es ++ + +ĠFr ance +Ġb reat +ow l +Ġiss ued +ĠW estern +Ġdet ect +Ġpart ners +Ġsh ared +ĠC all +Ġcan cer +ac he +rib e +Ġexpl ained +Ġhe at +{ " +Ġinvest ment +ĠB ook +Ġw ood +Ġtool s +ĠAl though +Ġbelie f +Ġcris is +Ġg e +ĠM P +Ġoper ation +ty pe +~ ~ +g a +Ġcont ains +ant a +Ġexp ress +ĠG roup +ĠJ ournal +k a +Ġam b +ĠUS A +Ġfind ing +Ġfund ing +h ow +Ġestab lished +ide os +Ġdeg ree +Ġdanger ous +ang ing +Ġfre edom +pp ort +out hern +Ġch urch +Ġc atch +ĠTw o +Ġpres ence +ĠGu ard +U p +Ġauthor ity +ĠPro ject +Ġbut ton +Ġcon sequ +Ġval id +Ġwe ak +Ġstart s +Ġref erence +ĠM em +" ) +U N +or age +ĠO pen +Ġcol lection +y m +g ency +Ġbeaut iful +ro s +Ġtell s +Ġwa iting +n el +Ġprov iding +ĠDemocr ats +Ġd aughter +Ġm aster +Ġpur poses +ĠJapan ese +Ġequ al +Ġturn s +Ġdoc uments +Ġwatch ing +R es +Ġr an +201 4 +Ġre ject +ĠKore a +Ġvictim s +Le vel +ere nces +Ġw itness +Ġ3 4 +Ġre form +com ing +Ġocc up +Ġc aught +Ġtra ffic +ad ing +Ġmod els +ar io +Ġserv ed +Ġb atter +u ate +ĠSecret ary +Ġagre ed +Ġtr uly +yn am +ĠR et +Ġun its +ĠRes earch +h and +az ine +ĠM ike +Ġvar iety +ot al +Ġam azing +Ġconfir med +Ġentire ly +Ġpurch ase +Ġe lement +Ġc ash +Ġdeter mine +D e +Ġc ars +ĠW all +â ĸ +Ġview s +Ġdrug s +Ġdep artment +ĠSt ep +u it +Ġ3 9 +as ure +ĠCl ass +Ġc overed +ĠB ank +Ġme re +u ana +Ġmult i +Ġm ix +Ġun like +lev ision +Ġsto pped +Ġs em +ĠG al +ul es +Ġwe l +ĠJohn son +l a +Ġsk ill +Ġbec oming +ri e +Ġappropri ate +f e +ell ow +ĠPro t +ul ate +oc ation +Ġweek end +od ies +Ġsit es +Ġanim al +ĠT im +Ġsc ale +Ġcharg ed +Ġinst ruct +ill a +Ġmethod s +Ġc ert +Ġjud ge +ĠH el +Ġdoll ars +Ġstand ing +ĠS qu +Ġdeb t +l iam +Ġdri ving +ĠS um +ĠEd ition +Ġal bum +and on +I F +ĠU k +6 3 +ad er +Ġcommer cial +es h +ĠGovern ment +Ġdisc overed +Ġout put +ĠHill ary +ĠCar ol +Ġ200 5 +Ġab use +anc ing +Ġsw itch +Ġann ual +T w +Ġst ated +ag ement +in ner +Ġdem ocr +Ġres idents +Ġallow ing +Ġfact ors +od d +Ġf uck +em ies +Ġoccur red +ot i +Ġn orth +ĠP ublic +Ġinj ury +Ġins urance +C L +oll y +ã Ģ +Ġrepe ated +Ġar ms +ang ed +Ġconst ruction +Ġf le +P U +ic ians +Ġfor ms +ĠMc C +ant ic +Ġm ental +p ire +Ġequ ipment +Ġf ant +Ġdiscuss ion +Ġregard ing +k in +ar p +Ġch air +og ue +Ġpro ceed +ĠI d +O ur +Ġmur der +M an +Ġ4 9 +as p +Ġsupp ly +Ġin put +Ġwe alth +liam ent +Ġpro ced +or ial +ĠSt at +ĠN FL +hen s +ĠInst itute +Ġput ting +ourn ament +et ic +Ġloc ated +Ġk id +er ia +r un +Ġpr inc +Ġ ! +go ing +ĠB et +Ġcl ot +Ġtell ing +Ġprop osed +i ot +or ry +Ġfund s +g ment +ĠL ife +Ġb aby +ĠB ack +Ġsp oke +Im age +Ġear n +ĠA T +g u +Ġex change +ĠL in +ov ing +Ġp air +M ore +az on +Ġarrest ed +Ġkill ing +c an +ĠC ard +y d +Ġident ified +Ġm obile +Ġthan ks +ony m +ĠF orm +Ġhundred s +ĠCh ris +ĠC at +Ġtre nd +h at +ĠA v +om an +Ġelect ric +ĠW il +S E +O f +Ġrest aur +ot ed +Ġtr ig +Ġn ine +Ġb omb +Wh y + ¯ +Ġco verage +Ġapp eal +ĠRober t +ĠS up +Ġfin ished +Ġfl ow +Ġdel iver +Ġcal cul +Ġphot os +Ġph il +Ġpie ces +Ġapp re +k es +Ġr ough +D o +Ġpart ner +Ġconcern ed +Ġ3 7 +ĠG en +C ol +ct ors +Ġ= > +st ate +Ġsuggest ed +ĠFor ce +C E +Ġher self +ĠPl an +w orks +o oth +ren cy +Ġcor ner +Ġhus band +Ġintern et +ĠA ut +em s +os en +ĠAt l +g en +Ġbal ance +6 2 +Ġsound s +te xt +Ġar r +ov es +Ġmill ions +Ġrad io +Ġsat isf +ĠD am +M r +G o +S pe +Ġcomb at +r ant +ĠG ree +Ġf uel +Ġdist ance +Ġtest s +Ġdec re +ĠE r +Ġman aged +D S +Ġt it +Ġmeas ures +ĠL iber +Ġatt end +as hed +ĠJ ose +ĠN ight +d it +ĠN ov +ĠE nd +out s +Ġgener ation +Ġadv oc +y th +Ġconvers ation +ĠS ky +act ive +ce l +ri er +ĠFr ank +Ġg ender +Ġcon cent +Ġcar ried +and a +ĠV irgin +Ġarri ved +ic ide +ad ed +Ġfail ure +Ġmin imum +le ts +Ġwor st +Ġkeep ing +Ġint ended +Ġilleg al +Ġsub sc +Ġdetermin ed +Ġtri p +Y es +Ġra ise +Ġ ~ +Ġfeel s +Ġpack age +ĠJ o +h i +201 6 +re al +Ġf ra +Ġsy mb +M e +uck y +p ret +ĠK h +ĠEd it +ĠWe b +em ic +ĠCol or +Ġjust ice +I nt +Ġfar m +ck now +" > +el ess +Ġredu ced +Ġ5 00 +x x +ĠR ad +ĠW ood +Ġcl in +Ġhy p +il er +ur a +k ins +8 5 +6 1 +ĠThe ir +ĠM ary +Ġs an +Ġno vel +ĠWh o +Ġcap acity +Ġimp ossible +Ġpl ays +Ġmin ister +ij uana +ic ate +ĠS et +Ġf ram +Ġ ing +Ġcommun ities +ĠF BI +it a +Ġb on +Ġstr ateg +Ġinterest s +l ock +g ers +m as +ĠAN D +Ġconflic t +Ġrequire ments +Ġs ac +Ġoper ating +in i +rel ated +Ġcomm itted +Ġrelative ly +Ġs outh +¯ ¯ +Ġaff ord +Ġident ity +Ġdec isions +Ġacc used +pl ace +Ġvict ory +o ch +i at +N ame +C om +t ion +ed s +Ġsee k +Ġt ight +ĠIm ages +Ġinit i +Ġhum ans +Ġfam iliar +Ġaud ience +Ġintern al +vent ure +Ġs ides +ĠT O +Ġd im +Ġcon clud +Ġapp oint +Ġenforce ment +ĠJ im +ĠAssoci ation +Ġcircum st +ĠCanad ian +Ġjo ined +Ġdiffere nces +ĠL os +Ġprot est +Ġtw ice +w in +Ġgl ass +ars h +ĠAr my +Ġexp ression +Ġdec ide +Ġplan ning +an ia +Ġhand le +ĠMicro soft +ĠN or +Ġmax imum +ĠRe v +Ġse a +Ġev al +Ġhel ps +re f +Ġb ound +Ġm outh +Ġstand ards +Ġcl im +ĠC amp +ĠF ox +cl es +Ġar my +ĠTe chn +ack ing +x y +S S +Ġ4 2 +Ġbu g +ĠUk rain +ĠM ax +ĠJ ones +ĠSh ow +l o +Ġplan et +Ġ7 5 +Ġwin ning +Ġf aster +Ġspe ct +Ġbro ken +T R +Ġdef ined +Ġhealth y +Ġcompet ition +htt ps +ĠIs land +ĠF e +Ġannoun ce +ĠC up +ĠInst ead +Ġcl ient +Ġposs ibly +se ction +ock et +l ook +Ġfin ish +Ġcre w +Ġres erv +Ġed itor +Ġh ate +Ġs ale +Ġcontro vers +Ġp ages +w ing +Ġnum er +Ġopp osition +Ġ200 4 +Ġref uge +Ġfl ight +Ġap art +ĠL at +A meric +ĠAfric a +Ġapplic ations +ĠPal est +ĠB ur +Ġg ar +ĠSoc ial +Ġup gr +Ġsh ape +Ġspe aking +ans ion +a o +ĠS n +Ġwor ry +ĠBrit ain +P lease +rou d +Ġh un +Ġintrodu ced +Ġd iet +I nd +ĠSec ond +Ġfun ctions +ut s +ĠE ach +ĠJe ff +Ġst ress +Ġaccount s +Ġgu arant +ĠAn n +ed ia +Ġhon est +Ġt ree +ĠAfric an +ĠB ush +} , +Ġs ch +ĠOn ly +Ġf if +ig an +Ġexerc ise +ĠEx p +Ġscient ists +Ġlegisl ation +ĠW ork +ĠS pr +à Ĥ +ĠH uman +Ġ è +Ġsur vey +Ġr ich +ri p +Ġmain tain +Ġfl o +Ġleaders hip +st ream +ĠIslam ic +Ġ 01 +ĠCol lege +Ġmag ic +ĠPr ime +Ġfig ures +201 7 +ind er +x ual +ĠDe ad +Ġabsolute ly +Ġfour th +Ġpresent ed +resp ond +rib le +Ġal cohol +at o +ĠD E +por ary +Ġgr ab +Ġvar i +Ġqu ant +ĠPh oto +Ġpl us +r ick +ar ks +Ġaltern ative +Ġp il +Ġappro x +th at +Ġobject s +ĠR o +ĠAnd roid +Ġsignificant ly +ĠR oad +k ay +R ead +av or +Ġa cknow +ĠH D +ĠS ing +O r +ĠM ont +Ġun s +pro f +Ġneg oti +ĠAr ch +ik i +Ġte levision +ĠJew ish +Ġcomm ittee +Ġmot or +Ġappear ance +Ġs itting +Ġstri ke +ĠD own +com p +ĠH ist +Ġf old +ac ement +ĠLou is +Ġbel ong +ĠâĢ ¢ +Ġm ort +Ġprep ared +Ġ6 4 +ĠM aster +Ġind eed +ĠD en +Ġre nt +T A +our ney +ar c +S u +9 7 +Ġadv ice +Ġchang ing +Ġlist ed +Ġlaun ched +is ation +ĠP eter +is hes +Ġl ived +ĠM el +ĠSup reme +ĠF ederal +Ġ) ; +ruct ure +Ġset s +Ġphil os +u ous +Ġ ł +Ġappl ied +ĠN OT +Ġhous ing +ĠM ount +Ġo dd +Ġsu st +D A +ffic ient +Ġ ? +ol ved +Ġp owers +Ġth r +Ġrem aining +ĠW ater +L C +Ġca uses +ãģ ® +Ġman ner +ad s +Ġsuggest s +Ġend s +stand ing +f ig +ĠD un +id th +Ġg ay +Ġter min +ĠAngel es +M S +Ġscient ific +Ġco al +ap ers +b ar +ĠThom as +Ġsy m +ĠR un +th is +P C +igr ants +Ġmin ute +ĠDist rict +cell ent +Ġle aves +Ġcomple ted +am in +Ġfoc used +Ġmon itor +Ġveh icles +M A +ĠM ass +ĠGr and +Ġaffect ed +itution al +Ġconst ruct +Ġfollow s +Ġt on +re ens +Ġh omes +ĠE xt +ĠLe vel +r ast +ĠI r +Ġel im +Ġlarge ly +ĠJ oe +Ġvot es +all s +Ġbusiness es +ĠFound ation +ĠCent ral +Ġy ards +Ġmaterial s +ul ner +Ġgu ide +Ġclos er +um s +Ġsp orts +ed er +J ust +Ġtax es +8 4 +ĠO ld +Ġdec ade +ol a +Ġv ir +Ġdro pped +Ġdel ay +it ect +Ġsec ure +ste in +le vel +Ġtre ated +Ġfil ed +ain e +Ġv an +Ġm ir +Ġcol umn +ict ed +e per +Ġro t +Ġcons ult +Ġent ry +Ġmar ijuana +ĠD ou +Ġapparent ly +ok ing +clus ive +Ġincre ases +an o +Ġspecific ally +Ġte le +ens ions +Ġrelig ion +ab ilities +Ġfr ame +ĠN ote +ĠLe e +Ġhelp ing +Ġed ge +ost on +Ġorgan izations +à ĥ +ĠB oth +hip s +Ġbig ger +Ġbo ost +ĠSt and +Ġro w +ul s +ab ase +Ġr id +L et +are n +ra ve +Ġst ret +P D +Ġv ision +Ġwe aring +Ġappre ci +Ġa ward +ĠU se +Ġfact or +w ar +ul ations +) ( +Ġg od +Ġter rit +Ġpar am +ast s +8 7 +Ġen emies +ĠG ames +F F +Ġacc ident +W ell +ĠMart in +T ER +Ġat h +ĠHe ll +Ġfor g +Ġve ter +ĠMed ic +f ree +Ġst ars +Ġexp ensive +Ġac ad +ra wn +ĠW he +Ġl ock +Ġform at +Ġsold iers +s m +Ġag ent +Ġrespons ibility +or a +ĠS cience +Ġrap id +Ġt ough +ĠJes us +Ġbelie ves +M L +Ġwe ar +le te +Ãĥ ÃĤ +ĠD ri +Ġcomm ission +ĠB ob +O h +ap ed +Ġwar m +ÃĥÃĤ ÃĥÃĤ +Ġ200 3 +ort ion +Ġhas n +ust er +Ġun ivers +ĠI ll +Ġk ing +olog ies +9 4 +ĠT em +ĠM os +Ġpat ient +ĠMex ico +ce an +ĠDe ath +ĠSand ers +y ou +ĠC ast +ĠComp any +pt y +Ġhappen ing +F P +ĠB attle +Ġb ought +A m +M od +U s +ut ers +ĠC re +ĠTh ose +Ġ4 4 +is er +Ġs oul +ĠT op +ĠHar ry +ĠA w +Ġse at +ff ee +Ġrev olution +Ġ( " +ĠD uring +et te +Ġr ing +Ġoff ensive +Ġreturn s +Ġv ideos +Ġdis cl +Ġfam ous +en ced +ĠS ign +ĠR iver +Ġ3 00 +P M +ĠB us +ĠC H +Ġcandid ates +ard en +Ġpercent age +Ġvis ual +Ġthan k +Ġtrou ble +ner gy +Ġ200 1 +Ġpro ve +ash ion +Ġen h +ĠL ong +U M +Ġconnect ed +Ġposs ibility +O ver +Ġexper t +Ġl ibrary +art s +ĠDirect or +Ġfell ow +9 2 +ir ty +Ġd ry +Ġsign s +ĠL ove +Ġqu iet +f oot +Ġp ure +ĠH un +Ġf illed +ph as +ĠE lect +end ment +ĠEx pl +Ġun able +n s +m o +Ġv ast +ob e +Ġident ify +app ing +ĠCarol ina +g ress +Ġpro te +Ġf ish +Ġcircumst ances +raz y +ĠPh ot +Ġb odies +ĠM ur +Ġdevelop ing +ĠA R +Ġexperien ced +Ġsubst ant +ĠBo ard +es ome +Ġdom estic +Ġcomb ined +ĠP ut +Ġchem ical +ĠCh ild +Ġpo ol +ĠC y +Ġe gg +c ons +st ers +Ġh urt +Ġmark ets +Ġconserv ative +Ġsupp orters +Ġag encies +id el +O b +ur b +Ġ4 3 +ĠDef ense +y e +ĠA p +du le +Ġtemper ature +Ġconduct ed +ĠCh ief +Ġpull ed +Ġf ol +L ast +ont o +os is +V ER +D es +ĠP an +F irst +Ġadv ance +Ġlic ense +r ors +ĠJ on +Ġimag ine +Ġhe ll +Ġf ixed +Ġinc or +os ite +ĠL og +ick en +] : +Ġsurpr ise +h ab +Ġc raft +ol t +ĠJ ul +Ġd ial +Ġrele vant +Ġent ered +Ġlead s +ĠA D +ĠCle an +Ġpict ures +ess or +Ġal t +Ġpay ing +P er +ĠMark et +Ġupd ates +am ily +ĠT ype +ĠH ome +Ġ5 5 +semb ly +rom e +8 3 +Ġgreat est +Ġhe ight +Ġhe av +ain ts +Ġlist en +as er +ĠS H +Ġcap able +ac le +Ġpers pect +in ating +Ġoff ering +ry pt +ĠDe velop +ab in +r c +Ġbr ight +al ty +ar row +Ġsupp l +ind ing +ack ed +gy pt +ĠAn other +p g +ĠVirgin ia +ĠL u +Ġpl anned +Ġp it +Ġswe et +T ype +ĠD i +Ġtyp ically +ĠFranc isco +Ġpro spect +ĠD an +Ġte en +re es +Ġsc hed +Ġh ol +Ġsc r +Ġlot s +l ife +Ġnews p +Ġfor get +ĠN one +ĠM iddle +ĠR yan +ed d +Ġse vere +Ġsu it +ll er +9 3 +Ġcor respond +Ġexpl os +u ations +Ġfl ag +g ame +r id +Ġpr in +ĠD ata +Ġde ploy +ĠEn ter +su it +gh an +ĠM en +Ġthough ts +Ġmat ters +Ġad apt +ĠA ri +Ġf ill +Ġfor th +Ġs am +Ġ4 1 +Ġpay ment +ĠH or +Ġsp ring +du c +Ġl osing +Ġbring ing +F O +al a +Ġdist ribution +he red +b our +ĠIsrael i +om a +Ġcomb ination +Ġpl enty +V E +C an +ĠH aw +Ġper man +ĠSpe cial +Ġto w +Ġsee king +Ġexam ples +Ġclass es +c r +Ġbe er +Ġmov es +ĠI P +ĠK n +Ġpan el +E ven +Ġproper ly +Ġr is +Ġpl ug +Ġestim ated +E very +Ġdef ensive +ag raph +Ġpre gn +Ġinst it +ĠV ict +Ġvol ume +Ġpos itions +Ġl inks +ĠPro gram +ĠWe ek +ag ues +Ġtrans form +k er +ĠC EO +Ġc as +Ġopp onent +Ġtwe et +ĠC ode +Ġsh op +Ġf ly +Ġtal ks +Ġb ag +Ph one +Ġa id +Ġpl ants +Ġ6 5 +Ġatt orney +ar ters +qu est +ĠMag ic +Ġbeg ins +Ġmy ster +Ġenvironment al +Ġst orage +N N +Ġm arg +Ġs ke +Ġmet al +ell y +Ġord ered +Ġrem ained +Ġl oved +Ġprom pt +Ġupd ated +Ġexper ts +Ġwalk ing +Ġan cient +Ġperform ed +AT E +Ġne ither +i ency +Ġmanufact ure +ĠP ak +Ġselect ed +Ġm ine +Ġult imately +Ġexpl an +Ġlab el +ĠServ ices +ribut ed +Tr ump +Ġsy n +ĠU lt +S C +Ġme at +Ġg iant +ĠW ars +ĠO N +Ġad m +Ġinter pret +Ġeven ing +Ġev il +ĠB oston +ĠW ild +Ġ à +ĠBit coin +ĠAm azon +D r +ĠIn formation +Ġobvious ly +Ġadv anced +Ph oto +ol ar +Ġwe ather +Ġsymb ol +Ġso le +Ġpot entially +ost er +Ġorig inally +m un +3 00 +az e +ess ions +Ġde ck +Ġst ood +Ġyou th +ĠB ern +R ep +ĠT est +Ġbas ically +ot ic +Ġinvol ve +ol it +ly n +S ee +Ġair craft +Ġconf irm +E W +Ġmess ages +ĠRich ard +Ġk it +Ġpro hib +Ġv ulner +is ters +Ġexist ence +Ġturn ing +ĠS P +Ġdes ire +Ġfl at +Ġm ent +se ason +ang es +Ġneighbor hood +ĠL ake +AT ION +Ġpoint ed +b ur +Ġinn ov +uc ks +U L +Ġprofess or +Ġexp ressed +A B +ic ious +Ġ200 2 +ĠDe v +Ġs ession +Ġb are +s en +Ġdis s +ĠC ath +ĠP ass +ĠP oint +Ġdo ctor +or row +ail ed +ĠR ub +ĠD C +ĠChar l +p erson +Ġwrit er +igh ters +ure au +Ġob lig +Ġrecord ed +Ġbro ke +Ġord ers +il ty +Ġmot ion +in ity +l aw +ad ium +Ġimm igration +Ġcontr ast +Ġb att +Ġex cellent +Ġtechn ical +am i +Ġt un +Ġcl oud +ĠY ear +ge on +Ġcre ation +Ġstr ange +Ġa uth +Ġfor t +b orn +Ġext ent +ĠT oday +ĠCl ub +Ġr ain +Ġs ample +Ġaccept ed +Ġt act +Ġf ired +ĠS on +Ġstand s +Ġb oot +Ġ4 7 +Ġstat ements +Ġvers ions +Ġse lling +ound ed +Ġ199 0 +Ġwere n +ĠW atch +Ġexper iment +P ost +Ġret ail +ul ed +In st +un te +ãĥ ¼ +Ġdep art +Ġb ond +i very +om pl +Ġre action +ĠSyri an +ĠP ac +app ed +ani el +D P +Ġres olution +Ġre act +Ġappro ved +on om +m ond +ĠO ffic +-- - +Ġrepl ace +Ġt ack +Ġsp ort +Ġch ain +Ġemer gency +r ad +ĠPalest in +Ġ4 6 +Ġautom atically +Ġrout e +Ġp al +Ġb anks +ĠPar is +ĠMed ia +ro ad +ic ing +i xt +ist ed +Ġg rew +Ġco ord +ĠW here +om in +Ġsub s +� � +Ġ ± +Ġcorpor ate +Ġse lection +n oon +ĠRep ort +c s +clud ing +ord ers +anc he +ĠIt s +Ġslow ly +ĠE gypt +ĠA cc +Ġcol le +iqu es +E X +Ġattempt s +ur l +ĠC ross +Ġfind ings +ĠS C +ĠO R +Ġind ex +ens ity +ĠW ay +ĠL and +Ġsh ock +d is +Ġd ynam +Ġc art +m osp +S ince +i est +ĠB oy +Ġst orm +ĠCont in +201 3 +he w +il it +Ġess ential +iqu id +O ther +ive red +Ġreason able +A ct +Ġsub sequ +ĠP ack +ĠF ort +Ġconsider ing +Ġun iversity +l og +Ġmar ried +Ġill ust +ĠTr ue +£ ı +Ġnumer ous +rast ructure +Ġserious ly +Ġrefer red +u a +Ġconsist ent +on na +ĠRe al +ru ption +ci ples +Ġfact s +9 1 +ot es +er g +The n +Ġacc ompl +N ote +Ġre venue +Ġpass ing +Ġm al +e en +ĠY et +Ġg ather +ter day +ew ork +ĠA uthor +P e +Ġopt im +Ġr ub +Ġè £ı +Ġun known +st one +Ġun ion +ol ve +Ġopportun ities +Ġbrow ser +ĠW al +ĠC ost +Ġreport ing +st s +p et +Ġs and +Ġsudden ly +Ġsurpr ising +ĠV R +Ġsomew hat +ĠB as +ult ure +iz z +ĠC D +Ġchalleng es +Ġsett ings +Ġexperien ces +ĠF ull +Ġcan n +Ġrece iving +ES T +Ġj oint +Ġcult ural +Ġa st +8 2 +as tern +ce ived +ĠC ru +Ġb ull +p ired +am m +Ġfac ing +p ower +Ġb oss +ĠH ol +Ġinst r +Ġincreasing ly +Ġsh ift +Ġstre ets +ĠWilliam s +ab b +Ġl ie +Ġl augh +ĠC a +P L +Ġadult s +Ġcustom er +Ġob tained +Ġsupport ing +ht ml +f ire +Ġdetail ed +Ġpick ed +ĠR ight +ld er +E E +st ood +ĠK im +Ġw ire +Ġs ight +Ġdevelop ers +Ġpers ons +Ġs ad +Ġc up +Ġwar ning +Ġboy s +l ong +Ġb ird +f o +Ġw al +Ġobserv ed +Ġz one +iven ess +Ġch annel +c ript +Ġref used +ĠAg ain +Ġsu c +Ġspokes man +ĠRe f +r ite +ou ston +ãĥ ³ +ĠS her +Ġact s +ĠN ame +Ġstrugg le +ar ry +omet imes +Ġdisc rim +H T +Ġcateg ory +Ġreal ize +Ġemploy ee +ĠAf ghan +en ger +Ġgun s +ĠSte ve +ĠM ot +ĠO l +ok ed +Ġth ick +Ġfair ly +ill y +Ġsur ve +ĠM at +we ight +â Ķ +Ġtro ops +Ġag ents +Ġbatter y +Ġmot iv +à ¡ +S ec +d en +o very +L S +Ġfl u +Ġconf ident +ĠO per +Ġem pty +Ġp hen +Ġse ctor +Ġexc ited +Ġrem ote +ap h +o en +Ġdestroy ed +Ġmor al +ĠH P +ĠR on +Ġd ress +ĠB at +Ġl it +ĠM S +Ġa f +H L +r um +is ms +Ġshould n +Ġsym pt +ĠTor onto +het ic +Ġcar bon +Ġinstall ed +Ġviol ent +Ġsol ar +j a +Ġpract ices +Ġr ide +ĠP enn +Ġimpro ved +Ġaud io +Ġbehav i +ĠP S +Ġe ating +D ata +ĠRe view +p ass +cl aim +u ated +ang ers +c hen +Ġproper ties +Ġany where +An other +Ġbl ow +ĠJack son +Ġp roud +Ġplan e +l ines +Ġsqu are +Ġpro of +ans as +Ġtalk ed +m akers +Ġs ister +Ġhold s +Ġres ident +Ġ= = +Ġresist ance +Ġspl it +Ġpro secut +Ġconf idence +res ents +Ġcut s +Ġexcept ion +Ġz ero +Get ty +Ġcop yright +Ġtot ally +orm al +ific ations +ĠAustral ian +Ġs ick +Ġ1 50 +Ġhouse hold +Ġfe es +Ġdri vers +og en +ĠN Y +Ġnecess arily +Ġregul ations +ear ing +s l +Ġperspect ive +c are +ic ial +H is +Ġesc ape +Ġsurpr ised +ĠV an +ur rent +Ġv ac +8 1 +ĠTh us +Ġem phas +ĠCh ampions +ĠI ce +Ġn arr +Ġhead s +Ġca using +b el +f ortunately +ĠM a +Ġtarg ets +ci pl +Ġafter noon +Ġadd s +ĠMay be +ĠF our +ess ed +ple te +Ġus ual +ch o +ing u +Ġwith d +ĠE nergy +ĠE conom +O O +Ġart icles +Ġinj ured +Ġman age +Ġexpl ains +Ġdi agn +R ec +at ures +Ġlink ed +Ġdiscuss ed +Ġexpl o +Ġocc asion +ath an +Ġopp osite +Ġfac es +Ġden ied +ĠK night +Ġn ut +Ġapprox imately +Ġdisapp oint +onym ous +ĠB est +ĠL o +ĠH y +ĠA ff +Ġvot ing +an while +ĠII I +Ġinstit utions +ag ram +ĠD aily +Ġdr ag +Ġnear by +Ġgu ilty +Ġcon ver +P re +s hip +Ġre ward +Ġphilos oph +ĠS S +u gh +Ġapp s +f riend +Ġu pper +Ġad vert +Ġs now +Ġfr ust +Ġour selves +F r +ĠD ie +amp ion +Ġdis miss +Ġc ere +Ġsign al +f rom +Ġ ). +Ġ5 2 +Ġcr imes +it ors +est ival +use um +Ġcoun cil +ĠS aud +M ay +ĠG un +ic ian +et her +Ġsu fficient +ĠH en +so le +Ġhistor ical +ĠF ar +ĠT urn +Ġp in +Ġsuc ceed +m at +ly mp +Ġtrad ition +ĠO k +Ġc ro +Ġdesc ription +al le +Ġsk y +T e +Ġwide ly +Ġw ave +Ġdefin ition +ĠJew s +Ġcy cle +Ġref ere +Ġbr ings +us al +Ġal ive +Ġfrequ ently +Ġint ention +ĠCont rol +l v +y stem +Ġpriv acy +g ent +ren ce +ĠQu est +ĠChrist mas +Ġr ail +Ġco oper +Ġtest ed +ĠC apt +as ks +Ġcomfort able +Ġdel ivered +sc ape +Ġdep th +ĠG OP +Ġwrit es +Ġass ets +Ġsa v +im ents +Ġtrans ition +Ġart ist +ĠL ook +Ġl ob +Ġcomp onents +ar ity +Ġwalk ed +Ġro ot +Ġparticip ants +Ġnot iced +Ġres c +Ġn av +ĠAd minist +d a +ut ral +pl ate +Ġimport ance +Ġass ert +ious ly +c ription +Ġinj uries +ĠChe ck +Ġregist ered +Ġint ent +Ġmiss ed +ograph ic +Ġsent ence +oun ter +Ġassist ance +ev in +Ġdat abase +Ġbuild ings +Ġclass ic +Ġth inks +ĠOh io +P r +ug g +Ġfe e +p an +Ġeffect ively +Ġfac ility +Ġbe ar +Ġch apter +Ġdog s +ĠCol umb +Ġl atter +it ial +Ġad mitted +T V +ĠGe org +Ġpost s +\ \ +Ġlawy er +Ġequ ival +Ġm and +Ġcontro lled +ĠW alk +ĠAnd rew +Ġmen u +am ental +Ġprotect ed +v a +Ġadminist r +or al +Ġre in +ĠS ar +Ġamount s +Ġn ative +ĠM oon +Ġrep resents +Ġab andon +Ġcarry ing +Ġt ank +m ary +Ġdecl ared +T ube +Ġh at +Ġpun ish +el lect +m es +Ġun iverse +ĠR od +ph y +Ġinf rastructure +Ġ5 1 +Ġopp osed +ow nt +c a +ĠM ake +Ġhard ware +Ġco ffee +R el +b al +w orld +ĠS af +ĠSe a +in als +Ġown ed +Ġh all +ers ion +Ġdescrib e +ĠP ot +Ġport ion +Ġat mosp +Ġgovern ments +Ġdep ending +Ġoff ense +Ġtr ick +aw a +ĠL ine +ĠV is +ĠH ard +ĠOr ig +ĠCl ick +Ġdes k +ĠVal ley +ĠS ov +Ġmov ies +Ġrem ark +Ġm ail +Ġcons cious +Ġrul ing +ĠR ights +Ġmed ic +he nt +ĠW omen +> < +Ġrepl aced +ĠP rem +ĠTh anks +Ġre new +ĠB all +if orm +Ġsh ots +C omm +Ġar med +Ġconst ant +Ġt aste +Ġreal ized +Ġbu ff +Ġm o +Ġeffic ient +M ost +or ation +if ies +Ġcommun ication +Ġfl ood +Ġconsequ ences +Ġany way +ig g +ĠG M +ĠTh ank +Ġ iron +Ġev olution +ĠC op +tw itter +Ġ9 5 +Ġrelationship s +ad el +ĠYou ng +Ġpropos al +ay ers +uild ing +ĠH ot +OR E +c os +Ġcoll abor +P G +ax y +Ġknow ing +Ġsupport s +ow ed +Ġcontrol s +Ġmere ly +um er +Ġath let +Ġf ashion +p ath +Ġg ift +Ġer a +AN D +Ġkind s +ĠKore an +Ġleg it +ul ous +Ġess entially +Ġthe rap +n ic +Ġsuff ered +Ġh ur +Ġprom ise +Ġex cess +Ġover w +Ġpr ime +ĠH ouston +er ry +ĠM s +R S +201 2 +Ġst ores +ĠO lymp +Ġj ourney +Al though +S ub +ĠE duc +ĠCh apter +Ġrequest s +Ġconsum ers +Ġt iny +Ġis ol +ĠF air +b a +ĠY OU +Ġcr ash +ce ler +Ġemot ional +Ġgood s +Ġelect ed +Ġmod er +ĠLin ux +Ġbl ocks +Ġis land +ĠSoc iety +Ġelect ions +Ġbroad cast +Ġche ap +Ġn ations +Ġse asons +4 00 +Ġwas te +ĠS at +Ġfield s +em ploy +Ġprof ile +Ġauth ors +AL L +ĠG ra +w est +ĠT y +Ġdeath s +Ġv acc +Ġfor med +Ġd u +Ġon going +ĠMuslim s +el f +ig ure +Ġass ume +ĠUkrain e +w ater +Ġco ast +Ġvot ed +g or +ĠA S +ĠMich igan +az a +ĠAr m +i ro +Ġf lex +as ters +' ' +Ġwel come +ar l +Ġloc ations +ig ation +ĠF il +Ġbu ying +Ġarch itect +Ġhard er +ĠC ub +Ġinter face +Ġrestaur ant +Ġdisco ver +Ġex ceed +Ġfav our +ger y +Ġd uty +Ġp itch +ad or +ĠM ach +b oy +Ġrespond ed +Ġext ended +her s +M any +ra id +if er +ĠIn s +S er +Ġmed ium +s he +ĠS ports +Ġmag azine +ut ation +Ġlim its +ĠG all +Ġex ternal +raz il +Ġyoung er +t le +Ġrem ind +ĠC ON +Ġimmedi ate +Ġh idden +Ġvol unte +Ġsim pl +od cast +Ġph ase +d r +Ġpl ot +Ġexp osure +R I +og rap +v in +an ish +ĠAc ad +ĠEng ine +Ġexp ansion +ĠP ay +Y our +Ġpus hed +ĠE ll +ĠHe ad +Ġmarket ing +ĠA C +k et +Ġh its +Ġg ro +ĠA ge +ĠSc ot +] [ +Ġst im +Ġi Phone +Ī Ĵ +Ġn arrow +ĠGet ty +ĠTur key +Ġperfect ly +Ġen able +ut ch +Ġprec ise +Ġreg ime +Ġsh if +Ġcomp ens +g un +d iv +Ġch osen +ĠK en +An y +Ġtre es +Ġrecomm ended +ĠR en +u able +ĠH T +F ollow +E G +ĠH and +ĠK enn +Ġarg uments +Ġex ists +Ġb ike +ĠCons erv +Ġbre aking +ĠG ar +Ġc razy +Ġvirt ual +ay lor +ix el +Ġ19 80 +Ġper mission +ĠSer ies +Ġconsum er +Ġclose ly +c alled +Ġ5 4 +Ġhop es +Ġar ray +ĠW in +ĠLab our +Ġsp ons +ĠI re +Ġp ow +Ġread ers +Ġemploy ment +Ġcreat ure +Ġresult ing +Ġaccur ate +Ġmom ents +Ġarg ued +Ġp ed +D uring +Ġ5 3 +ĠT al +Ġs ought +Ġsuff ering +Ġ icon +le e +Ġ( $ +al ian + ° +Ġp ra +Ġbon us +( " +k o +Ġact ing +D E +f all +Ġcompar ison +Ġsm ooth +ĠN AS +u pp +ĠJose ph +ep ing +ĠT ake +ĠM id +Ġs ending +f ast +ĠF all +Ġdeal ing +us er +ĠOr gan +C o +Ġatt ached +Ġse es +% . +Ġtyp ical +AR T +Ġfind s +ĠAs ia +um in +ĠC ore +ĠE nt +in ent +u ce +ĠBl ood +ĠN ever +Ġem ails +Ġhigh light +Ġconf ront +at us +ut ed +Ġun us +Ġtop ic +ĠAd am +Ġb le +at i +Ġunder stood +S et +st ruct +T P +Ġm ob +a a +ĠSt art +pect ed +se ll +Ġded icated +ĠC A +u an +Ġsong s +esc ription +Ġte ch +Ġr ape +Ġas ide +Ġgr ant +Ġ5 6 +s ub +Ġarg ue +Ġcont aining +Ġsche dule +Ġliber al +Ġpublic ly +Ġheav ily +ĠU t +in er +ĠS ection +ĠC are +we et +l s +D is +âĶ Ģ +ĠF ollow +B ack +ĠI T +Ġb es +j i +ĠH it +est ed +Ġevery body +ĠSw ed +Ġfem in +Ġfac ilities +Ġcon ven +C omp +ĠO S +c ore +Ġan x +Ġdiv ision +ĠC am +ĠSt an +m ates +Ġexpl ore +pl om +Ġsh ares +pl oad +an es +Ġide al +et ers +ĠB ase +Ġpl astic +Ġdist inct +ĠNet work +ĠSe attle +Ġtrad ing +ens us +int end +Ġex hib +Ġinit ially +ĠF ood +Ġthous and +ĠBus iness +act er +Ġpar agraph +Ġrough ly +Ġw ww +Ġcreat ive +ĠCon f +Ġconsum ption +Ġfil ms +ag an +Ġob tain +Ġt all +Ġt or +Ġacknow led +Ġg rown +al o +K E +Ġ4 00 +end ers +t aining +U G +Ġsu icide +Ġwat ched +ĠL ist +al i +re hens +Ġsurround ing +Ġp ip +Ġf lying +ĠJ ava +ord an +Ġserv ing +in ations +p ost +Ġsh o +A v +Ġj ail +z y +Ġ199 9 +Ġ< / +Ġliter ally +ĠS ir +Ġexp osed +Ġl ies +st ar +Ġb at +Ġear ned +ĠD ig +Ġspec ified +ĠSe ason +Ġdeg rees +Don ald +Ġcent re +Ġsh aring +Ġwin ter +ĠC O +C he +Ġ Î +M P +Ġun w +Ġfew er +ĠM ir +Ġsomew here +ĠK ey +Ġattack ed +ĠK ir +Ġdom ain +Ġstrong er +Ġ9 9 +Ġpen alty +I d +Sc ript +Ġdecl ined +Ġne ck +Ġfra ud +Ġcur rency +Ġr ising +R C +âĢ¦ âĢ¦ +H z +Ġt ab +Ġtal ent +n am +ĠN BA +Ġvill age +Ġleg s +ĠN ext +E d +Ġac id +Ġhy d +8 00 +Ġinvol ving +ĠIm age +ĠBe fore +F l +Ġyes terday +S ource +Ġterror ist +Ġsu p +Ġsy nt +ĠSaud i +Ġw est +Ġr u +b urg +Ġvis ible +Ġstru ck +r ison +Ġaw esome +Ġd rawn +Ġansw ers +ĠG irl +ĠR am +Ġthreat s +Ġdef eat +os it +Ġv ent +atur ally +Americ an +end a +ĠH oly +Ġr um +% , +c ase +ĠHist ory +ĠYou Tube +Ġsit uations +ĠD NA +S te +Ġsa ved +It em +Ġrec ip +olog ist +Ġfac ed +Ġel ig +O nce +ĠL i +u h +Ġmist ake +ĠDiv ision +ĠB ell +Ġsympt oms + ® +Ġdom in +Ġfall ing +Ġend ing +as hes +Ġmat ches +ĠOn line +Ġexplan ation +D ef +red it +Ġany more +ĠT otal +ĠF OR +us hed +Ġlet ters +Ġris ks +ĠO K +Ġreported ly +: \ +Ġpl ate +Ġsubject s +Ġattempt ed +if ier +ian a +Ġunlike ly +ĠTh ough +um a +ĠIn vest +ĠPr in +ic an +ĠD ar +ĠColor ado +au g +Ġve get +a os +ri a +Ġshe l +Ġmark ed +Ġ( ) +Ġsp r +p o +ĠL ink +Ġdef e +ĠJ r +Ġthem e +Ġpass ion +ĠP en +Ġinf o +iz er +Ġsh it +ĠC ivil +ap se +c re +Ġpo ly +Ġcomp onent +ĠChar les +ĠIre land +ĠPro v +Ġdo ctors +Ġgr anted +Ġpain t +Ġhon or +Ġsm oke +Ġpay ments +Ġprim arily +ĠKing dom +r ich +ate ll +Ġde als +Ġsched uled +Ġfund amental +Ġprote in +Ġnewsp aper +Ġcl ients +yth on +ĠD ate +h us +Ġfeed back +Ġstret ch +Ġc ock +Ġhot el +ĠQue en +Ġsu gar +Ġj u +Ġmil k +Ġappro val +ĠL ive +Ġequival ent +ef ully +Ġins ert +z ona +Ġext ension +d ri +J ohn +Ġacc omp +S m +ĠF und +Ġconst antly +Ġ` ` +Ġgener ated +ĠA ction +ĠP sych +ĠT ri +Ġrecogn ize +Ġv ary +ph a +ĠR a +d f +et ch +ĠSov iet +Tw o +Ġpattern s +Ġprof ession +an ing +T ime +ĠL im +Ġcol ors +ĠA z +ĠT R +Ġinf ect +Ġphen omen +Ġshe ll +Al so +Ġput s +Ġdel ivery +Ġbro wn +Ġprocess ing +Ġlight s +ess age +ĠBro ok +ĠA ud +l ation +Ġindust rial +L ike +ĠB razil +rou s +ES S +ĠL uc +Ġsome how +Ġ8 5 +Ġpro port +Ġpolit icians +Ġindic ate +Ġh ole +Ġtechn iques +Ġcompet itive +Ġph r +Ġv o +ist ent +ĠD ream +Ġcamp us +Ġaspect s +Ġhelp ful +Ġsh ield +or se +Ġtrig ger +m al +Ġ5 8 +Ġt ort +Ġperson ally +Ġt ag +Ġkeep s +ĠV ideo +Ġben ch +Ġg ap +a ire +Ġe ast +Ġrec overy +per ial +Ġprof it +ĠM ic +Ġ5 7 +Ġcol on +Ġstrong ly +st yle +Ġalleg ations +h an +Ġrep orters +j o +r ine +arg et +and al +Ġ0 3 +Ġfl ash +tr ans +Ġstr ict +Ġpark ing +ĠPak istan +Ġl i +Ġwe ird +ĠE ric +Ġreg ions +ĠJ un +Ġint ellect +ĠW H +od ing +rib utes +up id +ĠT it +Ġf inger +or ia +Ġe lev +ĠF ield +Ġcon clusion +; ; +Ġfeel ings +Ġext ensive +Ġm ixed +Ġne uro +v y +Ġhar ass +ĠC irc +ou ch +Ġterrit ory +Ġsuccess fully +M ar +Ġing red +Ġoverw hel +Ġl ayer +V iew +Ġall ies +ill ance +ĠTh ree +Ġb unch +Ġnorm ally +Ġnet works +Ġsac r +ĠC IA +b les +Ġch ose +Ġopp onents +Ġregard less +Ġfr anch +Ġpre f +ĠP o +Ġbr idge +ann a +ĠSil ver +Ġw age +p age +ri or +Ġrad ical +ĠL ittle +Ġman ip +Ġsecret ary +Ġg ang +D R +F A +Ġdec ent +ĠSp irit +Ġun cle +ĠDevelop ment +Ġinvest ors +Ġwall s +Ġpub lish +Ġgener ate +iss ions +c ar +Ġprom ote +Ġcut ting +Ġche st +Ġdrink ing +Ġcollect ed +Ġ7 2 +Ġhop ing +Ġem br +gor ith +Ġwar ned +Ġinstruct ions +O G +ĠD id +ĠAg ency +Ġg ear +Ġcritic ism +ĠF urther +Ġut il +ann y +R ed +Ġcoun sel +ĠAs ian +Ġredu ction +p ool +Ġteach ing +Ġdeep ly +i y +Ġestim ates +Ġcho ices +Ġperman ent +in em +ke l +Ġf asc +p se +f ile +ĠL ow +ĠP erson +Ġt ournament +st al +Ġm el +U ST +ĠR ay +az i +V al +Ġcont ained +ĠH olly +Ġw ake +Ġreve al +Ġprocess es +ĠIS IS +Ġ0 9 +Ġbl ind +Ġste el +ĠB ad +Ġcare fully +app y +ro it +Ġg aming +Ġhous es +ĠC oll +Ġtr uck +er m +Ġsc ored +Ġocc as +ret urn +b ound +v ar +Ġsh arp +Ġaf raid +ĠE X +am ber +c ific +Ġsche me +N C +ĠPol it +Ġdecl ine +Ġ199 8 +Ġpus hing +Ġposs ession +Ġpriv ile +Ġteacher s +Ġy ield +H A +ĠDav is +it led +#### #### +Ġr ig +ĠD aniel +ac on +Ġh ide +ut en +Ġcolle agues +Ġprin ciples +Ġl oud +Ġs in +ĠDem on +Ġst one +Ġ0 2 +Ġt aught +Ġter rible +Ġst uck +ĠPol icy +te en +Ġimplement ation +ĠB BC +ĠAP I +Ġwhe el +all as +Ġch ampions +ol ars +play er +Ġrepeated ly +ĠSt ill +Ġlik es +ast y +es ter +ĠCath olic +R L +Ġb ath +Ġno ise +t itle +Ġn orthern +P art +Ġmag n +Ġf ab +ĠAs h +Ġdis pl +Ġtick et +Ġm urd +Ġalong side +ĠMus ic +Ġr iver +ĠSte el +ĠC L +ĠPl ayer +ĠM ult +ow ing +re p +s ize +Ġt ur +ĠGeorg ia +isc al +ra ction +Ġc able +Ġ5 9 +Ġw ins +Ġup coming +Ġsurv ive +Ġins pired +ĠEduc ation +Ġstat istics +ĠF oot +iam i +Ġy ellow +ĠP age +. - +ĠH as +Ġur ban +Ġa x +es sel +\ " +Ġquarter back +Ġreg ister +ĠLab or +Ġab ilities +ĠF amily +Ġvar iable +ĠPr ice +Ġcont em +Ġth in +ĠE qu +d ata +Ġg otten +Ġconst it +Ġas ks +Ġt ail +Ġexc iting +ĠE ffect +ĠSp anish +Ġencour age +ins on +ĠA h +Ġcommit ment +C S +Ġr ally +Ġ: : +Ġsubs id +Ġsp in +Ġcapt ured +201 8 +Ġinn oc +Ġalleged ly +ĠC ome +Ġart ists +ĠN umber +Ġelect ronic +Ġreg ional +ap es +Ġw ra +Ġmy th +pr ise +ĠM iller +ĠC reat +ĠEp isode +b ell +Ġdirect ed +Ġext ract +Ġs orry +Ġv ice +ag ger +ĠSu pport +Ġ6 6 +ĠI ron +Ġwonder ful +Ġg ra +N et +ion e +E ng +Ġsh ips +ik es +ĠK evin +it ar +Ġactiv ists +tr ue +ĠAri zona +ent h +ĠDes pite +ĠS E +Ġha bit +ern el +Ġin qu +Ġab ortion +Ġv oid +Ġexpl icit +Ġeng aged +Ġang ry +Ġr ating +Ġfr ag +b ro +ick ing +d ev +Ġwor ried +Ġob ser +Ġap artment +ĠG T +Ġest ate +ĠConst itution +em on +ĠS now +Ġcount y +Ġdis ag +ĠStep hen +Ġimm igrants +w ind +ĠN ations +Ġfol ks +O ut +Ġg all +Ġtarget ed +Ġst ead +ĠB on +ĠL ib +Ġinform ed +Ġ12 0 +ch ain +idel ines +or ough +Ġdri ven +Ġregular ly +Ġbas ket +Ġprinc iple +oc ument +Ġst un +ib ilities +ĠRom an +ĠAb out +Ġal ert +Ġdemocr acy +Ġrepresent ed +H S +c ers +p arent +Ar t +p ack +Ġdi plom +re ts +ĠN O +Ġcapt ure +ĠAd v +Ħ ¢ +Ġannounce ment +ĠL ear +Ġh ook +Ġpur s +ĠS uch +ĠC amer +Ġrefuge es +ĠV e +P ol +Ġrecogn ized +l ib +Ġhad n +A ss +Ġpil ot +us hing +Ġreturn ing +Ġtra il +ĠSt one +Ġrout ine +Ġcour ts +Ġdes per +Ġfriend ly +ĠIt aly +Ġpl ed +Ġbreat h +Ġstud io +N S +Ġimp ressive +ĠAfghan istan +Ġf ing +Ġd ownt +ink ing +ĠR og +i ary +col or +se x +ar on +Ġf ault +ĠN ick +D own +ĠR ose +ĠS outhern +X X +is odes +L ist +6 00 +Ġout come +er r +Ġelse where +Ġret ire +Ġp ounds +ĠGl obal +Pe ople +Ġcommun ications +Ġlo an +Ġrat io +ĠEm pire +Ġg onna +Ġinv ent +D F +Ġ19 70 +ĠComm on +p at +Ġprom ised +Ġd inner +ĠH om +Ġcreat es +Ġoper ate +ver ty +ĠJ ordan +et ime +Ġsust ain +R eg +Ġincred ible +im a +Ġwar rant +Ġm m +A tt +Ġlaw suit +Ġreview s +it ure +ĠS ource +l ights +ĠF ord +Ġ6 3 +g roup +st ore +Ġfeat ured +Ġfore ver +Ġpo verty +ĠP op +ĠC NN +az z +ab is +ach ing +Ġl aid +ĠSu pp +Ġfil ter +en a +ĠCommun ity +Ġcreat ures +u ction +ĠR oyal +Ġassoci ation +ĠCon nect +ĠBr ad +âĸ Ī +l ers +the re +ĠG i +Ġval uable +AC K +ĠT aylor +Ġl iquid +ĠAtt orney +ĠCar l +ĠF inal +ag a +ĠWil son +B ecause +ĠProf essor +ak a +Ġincred ibly +r ance +! ) +R ef +s k +Ġsol utions +Ġatmosp here +Ġbl ame +um es +ĠN ob +C A +um ps +r ical +ĠPut in +ĠD est +or ic +ĠP A +Ġrespect ively +w an +Ġfif th +â Ħ¢ +ĠC ry +Ġgovern or +res ident +Ġpurch ased +Ġh ack +Ġint ense +ob s +Ġorig in +Ġdef ine +Ġcare ful +** * +Ġshould er +Cl ick +Ġt ied +Ġdest ruction +ou red +Ġno body +Ġh o +ĠEx per +Ġt ip +" ; +Ġtechn ique +Ġj ur +ĠP ok +b ow +Ġleg end +Ġacc ord +Ġbus y +ĠInt el +Ġh ang +ak i +. ] +âĢĶâĢĶ âĢĶâĢĶ +Ġsur gery +Ġrep rodu +Ġun iform +Ġscen es +c ode +Ġ6 2 +l isher +ĠH ave +ph ia +Ġcry pt +Ġrec on +Ġsc ream +Ġadop ted +Ġsc ores +N e +ĠIt alian +in cluding +B O +Ġindic ated +Ġent ertain +G u +T ext +i el +Ġtw enty +Ġeng age +off s +ĠPac ific +Ġsm ile +Ġperson nel +Ġto ler +Ġdo ors +Ġt one +Ġmach ines +Ġent ering +ten ance +C O +ĠJer sey +Ġfore st +Ġhor se +Ġcompl aint +ĠSpr ing +y o +ĠPl us +ed ing +ĠRet urn +qu arters +ial s +c ow +Ġacad emic +Ġf ruit +Ġ199 6 +og ether +Ġw ine +Ġpur su +ĠSte ven +Ġlic ens +Wh o +Ġclot hes +re ction +Ġsqu ad +Ġst able +Ġr aw +z ens +St ar +ut ies +anc er +Ġke ys +ĠM u +Ġcompl icated +ig er +ĠTe xt +Ġabs or +Ġ6 8 +Ġfun ny +Ġrel ief +ĠL ew +ĠC ook +Ġch art +Ġdraw ing +G E +Ġmod ule +ĠB ull +I LL +Ġs alt +0000 0000 +il le +Ġres ource +aw ay +adel phia +ĠB ru +Ġ6 7 +Ġsome body +Ġparticip ate +Ġro se +we red +Ġmus cle +Ġcons ent +Ġcontin uing +ĠGuard ian +ĠOr der +reg on +Ġre ar +Ġprov ision +Ġlik ed +ri ent +Ġb ra +Tr ans +Ġmeet ings +Ġto x +Ġcon vent +Ġaut o +Ġrec ording +ĠSo ft +00 1 +ĠR oll +Ġprogram ming +Ġp ic +Ġprov ed +Ġst ab +ĠA st +Ġca ption +ul ating +ĠAtt ack +Ġnew ly +Ġ199 7 +f r +Ġdis cipl +ĠGree k +Ġed ition +ĠDo es +ĠB ox +if le +ack et +Ġpass es +Ġgu est +Ġac celer +it als +U D +Ġaut hent +ĠR est +ov al +t a +u ine +Ġarm or +ĠT own +Ġcomp at +Ġinc hes +Des pite +Ġass ign +he rent +Ġprep are +ĠM eg +oc key +Ġdep ends +Ġtrack s +w atch +Ġl ists +ĠN orthern +Ġal ter +re c +ĠE astern +Ġcond em +Ġevery where +? ' +Ġaff ili +Ġf ought +": {" +Ġm ac +it arian +Ġsc ope +ĠA L +aw s +ar ms +Ġqu e +Ġenjoy ed +nes ota +Ġagg ressive +ĠSt ory +ĠI V +Ġrec ipe +Ġrare ly +ĠMed ical +val ue +ang el +ay ing +omet hing +Ġsub section +Ġs outhern +Ġfrequ ency +re te +roll ed +ult s +ĠN ic +Ġbeh alf +Ġsequ ence +ab et +Ġcontrovers ial +Ġcomp rom +Ġwork er +Ġmain ly +Ġal gorith +ĠM ajor +or ce +g ender +Ġorgan ized +Ġf ake +Ġconclud ed +ĠE D +ĠEx ec +r age +Ġch ances +ber ry +ĠTr ad +Ġconfig uration +Ġwithd raw +Ġf ro +ud es +ĠBro ther +ĠB rian +Ġtri es +Ġsam ples +Ġb id +ĠGold en +Ġphot ograph +if est +ĠD O +ĠPar liament +******** ******** +R em +Ġcont est +Ġsign ing +p x +ĠZ eal +âĶĢ âĶĢ +E ar +Ġex it +Be fore +ĠCor por +n ull +mon th +Ġrac ial +ott ed +ĠV eg +ĠRe uters +Ġsw ord +ps on +ĠRom ney +a ed +Ġt rib +Ġin ner +Ġprot ocol +ĠB i +ĠM iami +ever al +p ress +Ġsh ipping +ĠAm endment +ĠHow ard +con nect +ĠD isc +ĠJ ac +iam ond +ĠThere fore +s es +ĠPrin cess +ĠUS B +ĠAn th +Ġsurve illance +Ġap olog +Ġ6 1 +ow a +Ġf ulf +j s +Ġl uck +ust ed +Ġ § +n i +Ġant icip +em an +Ġwin ner +Ġsil ver +ll a +ic ity +Ġunus ual +Ġcr ack +Ġt ies +e z +Ġpract ical +Ġprov ince +ĠPl ace +Ġprior ity +IC E +Ġdescrib es +Ġbr anch +F orm +ask a +miss ions +b i +Ġp orn +ĠTur k +Ġent hus +Ġf ighters +Ġ0 8 +ĠDet roit +Ġfound ation +av id +A re +Ġjud gment +cl ing +Ġsol ve +ĠDes ign +W here +hes is +ĠT ro +a fter +Ġne utral +ĠPalestin ian +ĠHolly wood +Ġadv is +ĠN on +y es +ol is +Ġrep utation +Ġsm ell +Ġb read +ĠB ul +ĠBe ach +Ġclaim ing +Ġgen etic +Ġtechn ologies +Ġupgr ade +row s +Ġdevelop er +ĠJ osh +ĠDis ney +erv ed +ip al +Ġun ex +Ġbare ly +t hen +ĠP ub +Ġill ness +et ary +ĠB al +Ġp atch +Ġbut t +Ġst upid +ĠD og +ĠD allas +f ront +ie ce +Ġprot ests +Ġch at +oen ix +Ġw ing +Ġpar liament +Ġ7 7 +ose xual +Ġre nder +pt ions +ĠCo ast +os a +ĠG reg +h op +ĠMan agement +Ġbit coin +Ġrec over +Ġincor por +or ne +ĠUs ing +Ġpre ced +Ġthreat ened +Ġspirit ual +ĠE vent +ĠF red +Ġadvert ising +Ġimprove ments +ĠC ustom +Ġer rors +Ġsens itive +ĠN avy +Ġcre am +L ook +Ġex clusive +Ġcomp rehens +Ġde leg +Ġcon ce +Ġrem em +Ġstruct ures +Ġst ored +N D +Ġ1 000 +U P +ĠB udd +A F +w oman +ĠAcad emy +ð Ł +se a +Ġtem porary +Ab out +es ters +Ġtick ets +Ġposs ess +in ch +o z +Ġl a +Ġcontract s +Ġun p +Ġc ig +ĠK at +ult ural +as m +Ġmount ain +ĠCapt ain +St ep +m aking +ĠSp ain +Ġequ ally +Ġl ands +at ers +Ġreject ed +er a +im m +ri x +C D +Ġtrans action +g ener +less ly +Ġ| | +Ġc os +ĠHen ry +Ġprov isions +Ġg ained +Ġdirect ory +Ġra ising +ĠS ep +ol en +ond er +Ġcon sole +in st +Ġb om +Ġunc ertain +1 50 +ock ing +Ġmeas ured +Ġpl ain +Ġse ats +Ġd ict +S L +af e +Ġest imate +iz on +at hered +Ġcontribut ed +Ġep isodes +omm od +G r +AN T +Ġ6 9 +G ener +Ġ2 50 +vious ly +rog en +Ġterror ism +Ġmove ments +ent le +oun ce +ĠS oul +Ġpre v +ĠT able +act s +ri ors +t ab +Ġsuff er +Ġn erv +Ġmain stream +ĠW olf +Ġfranch ise +b at +Ġdem ands +Ġag enda +Ġdo zen +Ġclin ical +iz ard +ĠO p +t d +Ġvis ited +ĠPer haps +Ġact or +Ġde lic +Ġcont ribute +Ġin ject +ĠE s +ac co +Ġlist ening +Ġcon gress +epend ent +Ġprem ium +Ġ7 6 +ĠIr ish +Ġass igned +ĠPh ys +Ġworld wide +Ġnarr ative +ot ype +m ont +b ase +ĠB owl +ĠAdminist ration +Ġrel ation +ĠE V +C P +Ġco vers +Ġ7 8 +Ġcert ific +Ġgr ass +Ġ0 4 +pir acy +ir a +Ġengine ering +ĠM ars +Ġun employ +ĠFore ign +st ract +Ġv en +Ġst eal +Ġrepl ied +Ġult imate +Ġtit les +d ated +Ġj oy +a us +Ġhy per +ak u +Ġoffic ially +ĠPro duct +Ġdifficult y +per or +Ġresult ed +rib ed +l ink +wh o +~~ ~~ +ĠSpe ed +ĠV iet +W ind +ĠBar ack +Ġrestrict ions +ĠSh are +Ġ199 5 +ition ally +Ġbeaut y +op t +Ġm aps +ĠC R +ĠN ation +ĠCru z +W ill +Ġelectric ity +Ġor g +Ġb urd +Ġviol ation +Ġus age +Ġper mit +ĠCh ron +ĠF ant +Ġn aturally +Ġ0 7 +Ġth rown +ĠAw oken +Ġal ien +ĠHer o +ĠK ent +ĠR ick +ri ke +Ġp ace +}, {" +G L +Ġpo ison +ĠT ower +Ġform al +al ysis +Ġgen uine +Ġk il +a ver +Ġproced ure +ĠPro p +intend o +ĠM ain +as ant +Ġtr ained +G ame +ĠL oad +ĠM A +Ġcru cial +Ġle ts +ĠF R +Ġch ampion +1 01 +ĠCon ference +Ġwrit ers +Ġconnect ions +Ġo kay +ir ms +ĠR and +Ġenc ounter +ĠB uff +Ġachie ved +Ġche cks +isc ons +Ġassist ant +Ġwhen ever +ĠA ccess +ĠU r +b in +Ġcl ock +is p +op her +Ġb orrow +Ġm ad +Ġperson ality +on ly +IS T +ab ama +Ġg ains +Ġcommon ly +Ġter r +Ġhyp ot +Ġre ly +Ġt iss +iscons in +Ġrid ic +f unction +ĠO regon +Ġun com +r ating +el and +ĠN C +Ġm oon +ann on +Ġvulner able +ut ive +³³ ³³ +ĠRad io +Ġw estern +se ct +ĠT ony +Ġocc urs +ĠO s +ĠH on +Ã Ń +Ġv essel +ĠScot land +Ġdiscrim ination +Ġsubsequ ent +st ring +Ġfant asy +ĠSh adow +Ġtest im +W E +it i +r as +Ġbo at +Ġmar ks +Ġord inary +Ġre n +Ġrepresent ative +Ġpet ition +Ġ7 3 +Ġad venture +Ġign ore +ĠPhil adelphia +ĠS av +V P +Ġfact ory +Ġt asks +Ġdep ression +z ed +................ ................ +ĠSt orm +Ġc ogn +Ġelig ible +Ġredu cing +v ia +Ġ0 5 +Ġstri king +Ġdoll ar +h o +O V +Ġinstr ument +Ġphilosoph y +ĠMo ore +ĠA venue +Ġrul ed +ĠFr ont +IN E +ĠM ah +Ġscen ario +ĠNAS A +Ġen orm +Ġdeb ut +Ġte a +T oday +Ġabs ence +S im +Ġh am +le ep +Ġt ables +ĠHe art +M I +K e +re qu +V D +m ap +Ġchair man +Ġp ump +Ġrapid ly +v i +Ġsubstant ial +E P +d es +ch ant +ili pp +ĠS anta +ri ers +anche ster +L oad +ĠC ase +Ġsa ving +Ġ7 4 +ĠA FP +er ning +oun ced +ĠMin nesota +ĠW as +Ġrec ru +Ġassess ment +ĠB ron +U E +Ġdynam ic +Ġf urn +ul ator +Ġprop ag +h igh +Ġacc ommod +Ġst ack +ĠS us +w rit +Ġre ven +ĠGod d +ĠZeal and +ab s +Ġbr ut +Ġper pet +h ot +Ġhard ly +ĠB urn +ãĤ ¹ +Ġst y +Ġtrans actions +Ġg ate +Ġsc reens +Ġsub mitted +Ġ1 01 +Ġlangu ages +ugh t +em en +Ġfall s +Ġc oc +Ĥ ¬ +Ġstri kes +p a +Ġdel iber +ĠI M +Ġrel ax +ann els +ĠSen ator +Ġext rem +Ġ} , +ĠDe b +Ġbe ll +Ġdis order +c ut +Ġi OS +Ġl ocked +Ġem issions +Ġshort ly +" ] +ĠJud ge +ĠS ometimes +Ġr ival +Ġd ust +Ġreach ing +F ile +¯¯ ¯¯ +ino is +ĠJ ason +Ġs atell +are t +Ġst ations +Ġag ric +ĠTechn ology +com es +ĠUn fortunately +ĠChild ren +Ġappl ies +ast ed +Ġan ger +ail ability +ĠDam age +Ġcomp are +ĠStand ard +Ġaim ed +ĠB a +angu age +Ġreg ulation +Ġj ury +Ġair port +Ġse ctions +ĠPr ince +em ed +Ġmedic ine +Ġh itting +Ġsp ark +ol ves +Ġad s +St ate +Ġfood s +Ġrepl acement +Ġch icken +Ġlow est +Ġmind s +Ġinvol ves +u i +Ġarr ang +Ġproced ures +ĠWh ich +ivers ary +Ġb ills +Ġimprove ment +Ġin ev +Ġexpect ations +Ġintellect ual +Ġsp aces +Ġmechan ism +2 50 +bre ak +ĠZ e +ĠT enn +ĠB alt +Ġbar rel +Ġstat ic +man n +Pol ice +Ġt ips +Ġhand ling +c us +od ed +il ton +ir y +Ġjournal ists +our se +Ġcom ic +Ġnom ine +IT Y +Ġvers us +Ġlo op +Ġsur f +ĠInd ust +ĠHun ter +Ġbelief s +is an +Ġset up +Ġbre w +im age +Ġcomput ers +f ol +} ," +ĠMed al +Ġtax p +Ġdisplay ed +Ġg rav +Ġf iscal +M on +ĠMos cow +ĠK ong +ĠCent re +Ġcamer as +ĠMr s +ĠH ay +Ġa ver +ĠK elly +p y +Ġrequire ment +Ġent itled +omb ie +Ġsh adow +ag ic +ĠA k +Ġel ite +Ġdiv ided +Ġhead ing +Ġcop ies +Ġloss es +Ġv it +k ed +ĠB ry +Ġan s +ĠSte am +Ġrep orter +he im +ĠIt em +Ġsuper ior +d on +ere nt +à ¶ +Ġtherap y +Ġpe ak +ĠMod el +Ġl ying +Ġg am +z er +r itten +Ġrespons es +Ġconsider ation +ĠB ible +Ġl oyal +Ġinst ant +Ġp m +ĠFore st +à ¼ +Ġext end +Ġconv icted +Ġfound er +Ġconv in +ĠO ak +che ck +Ġsch olars +p ed +Ġover se +T op +c ount +ĠAr k + · +Ġ0 6 +ĠL A +m d +ĠLat in +im ental +ĠC PU +Ġsubst ance +Ġminor ity +Ġmanufact uring +E r +ocol ate +Ġatt ended +ĠMan ager +r ations +Ġappreci ate +om y +GB T +id ency +B L +Ġguarant ee +pos ition +Ġo cean +clud e +Ġhead ed +Ġt ape +Ġlo ose +Ġlog ic +Ġpro ven +Ġsp ir +Ġad mit +is a +Ġinvestig ate +Ġ199 4 +sy lv +ĠL ost +c est +Ġ7 1 +Ġrequest ed +Ġwind ows +ĠPok é +ĠWith out +M et +Ġbehavi our +Ġread er +Ġh ung +ĠKe ep +Ġro les +Ġimplement ed +Ġbl ank +Ġserv es +ĠJ ay +Ġc ited +ĠF riend +prof it +ap on +Ġrep air +it em +arr ass +Ġcrit ics +ad i +ĠF ather +Ġsh out +Ġf ool +Ġ8 8 +Ġprodu cing +Ġl ib +Ġround s +Ġcirc le +Ġpre par +Ġsub mit +Ġn ic +mor row +ãĥ « +U nder +Ġv ital +ater n +Ġpass word +Ġpublic ation +Ġprom inent +Ġspeak s +Ġb ars +Ġde eper +ĠM ill +port ed +Ġw id +Ġbut ter +Ġsm oking +Ġindic ates +K ey +rop ri +ĠF ile +all ing +ast ing +ĠR us +Ġad j +Ġ7 9 +av al +Ġpres um +bur gh +on ic +Ġf ur +Ġpoll s +ik a +Ġsecond ary +Ġmon ster +ig s +ĠCur rent +E vent +Ġowners hip +end ar +Ġarri ve +ĠT ax +Ġn ull +ĠPri v +Ġth ro +Ġk iss +c at +Ġup set +ang le +it ches +ect or +olog ists +ĠGal axy +Ġcor ruption +Ġh int +ent er +ĠH ospital +Ġgreat ly +Ġbeg un +es y +Ġso il +ĠAnt on +Ġmain tenance +ãĥ © +Ġdo zens +Ġhuman ity +ĠAl abama +Ġr om +w orth +ap ing +sylv ania +l ah +Ġg athered +G A +Ġattack ing +f ound +ĠSqu are +Ġar bit +ict ions +ĠW isconsin +Ġd ance +ĠS aint +arch y +Ġbase ball +Ġcontribut ions +Ġliter ature +Ġex ha +per ty +t est +Ġb ab +Ġcontain er +let ter +Ġfall en +Ġwebs ites +Ġbott le +ĠS ac +Ġbre ast +ĠP L +Ġveter an +Ġinterview s +ĠA le +Ġb anned +eng ers +ĠRev olution +in th +Ġconc erning +IV E +Ġexp enses +ĠMatt hew +ĠColumb ia +d s +ist ance +Ġent ity +.. ." +Ġrel iable +Ġpar alle +ĠChrist ians +Ġopin ions +Ġin du +l ow +Ġcompet e +Ġth orough +Ġemploy ed +Ġestablish ment +ig en +ĠC ro +Ġlawy ers +ĠSt ation +T E +ĠL ind +ĠP ur +it ary +Ġeffic iency +âĢ IJ +ĠL y +Ġm ask +Ġdis aster +Ġag es +ER E +es is +ĠH old +Ġcas ual +b led +Ġen abled +ĠEn vironment +ĠInt elligence +i per +ĠM ap +ĠB E +Ġemer ged +is dom +Ġc abin +Ġregist ration +Ġfing ers +Ġro ster +Ġfram ework +ĠDo ctor +et ts +Ġtransport ation +Ġaware ness +H er +Ġattempt ing +O ff +ĠSt ore +ÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤ +ĠK now +Ġdef ence +Ġsc an +ĠT en +ĠCh air +ĠP H +ĠAtl anta +Ġfuck ing +Ġans wered +b n +ĠK ar +Ġcateg ories +Ġr ational +Ġc ust +Ġrob ot +Ġcorrect ly +Ġg if +Ġgraph ics +m ic +Ġground s +ĠO pp +i ate +Ġdist ributed +Ġsan ctions +Ġchalleng ing +ut o +Ġingred ients +Ġinv ited +Ġfound ed +ĠRe qu +d ed +Ġb owl +Ġbrother s +ĠH a +I O +Ġw ages +im ore +oc ial +Ġse ed +ative ly +Ġaddress es +ĠI owa +ab eth +Ġatt itude +is d +ch ild +Ġm ole +Ġdisco very +y ard +B r +Ġ8 2 +Ġsuppl ies +ell ing +Ġdist ingu +C R +Ġre cept +Ġ vert +Ġsw im +b ec +d oor +ĠY eah +Ġg al +Ġinter act +ĠE SP +ĠC S +amp s +Ġconvin ced +Ġobject ive +Ġdis h +ĠPhot os +l ad +Ġdownt own +o il +in ction +Ġto morrow +ĠC OM +Ġsurv ival +sh ot +Ġsett lement +C ons +ĠX box +int erest +ĠS M +arg o +en ess +Ġeth nic +b ered +M in +ĠT ok +Ġinc ent +ĠComm and +Ġmain tained +Ġbreak s +br idge +at ar +ag g +ĠF inally +un icip +ĠO nt +le ft +Ġrecogn ition +Ġ* / +ĠP ers +Ġwe lf +Ġaddress ed +ĠK ansas +Ġvir us +Ġwhere as +Ġp apers +ram s +ĠMin istry +Ġple asure +Ġacqu ired +Ġd uration +j pg +Ġcal m +ĠN HL +Ġburn ing +Ġfold er +ick ed +ĠP y +ĠIll inois +Cl ass +ĠGodd ess +Ġperform ing +Ġwelf are +j ar +In ter +Ġl in +Ġenh ance +Ġnot ion +f are +yp es +ĠAre a +Ġcann abis +ĠDie go +f s +ĠM anchester +com m +in ite +Ġcover ing +ĠS ound +Ġ19 60 +Ġ8 4 +e lect +z ing +Ġcitiz en +Ġph ones +Ġr aid +Ġign ored +ĠOb ject +Ġu pload +c ard +Ġmod ified +Ġroom s +ia h +r ange +he ast +ach us +Ġsuggest ing +âĢ ĭ +gr ade +E l +Ġclot hing +Ġr h +ĠH an +un ity +en cing +ĠAust in +sec ution +t ra +d em +ĠQ ual +Ġhe aven +Ġst ages +Ġw edd +pl us +ific ial +ĠIm m +ĠH o +iet ies +Ġphr ase +Ġbr ill +act ory +Ġprov iders +Ġsil ence +Ġa er +ĠA I +ĠAd venture +Ġplatform s +Ġdemonstr ated +Ġinter f +ing ton +Ġr aces +Ġgr ade +ult ane +ĠTh rough +f alse +Ġb ow +ĠA B +Ġfl avor +Ġhistor ic +g ov +Ġcol our +Ġview ed +ĠEm ail +el come +Ġinter vention +Ġd iversity +Ġperiod s +Ġre verse +ĠV ery +Ġqu ote +ĠLe ft +th rough +Ġsc rew +Ġland ing +Ġp ill +Ġw et +Ġprot esters +Ġrepe at +av ed +er k +Ġsal ary +ĠPenn sylvania +St ill +Ġmay or +Ġkit chen +Ġfeat uring +ĠM useum +ĠT ournament +ĠF al +Ġser vers +U C +Ġany body +im g +ĠTr ade +ixt ure +the less +Ġfin ance +Ġcl osing +ĠPat ri +i ac +ab el +Ġ> > +or ous +Ġf irms +sc reen +un a +Ġemb arrass +ul se +Ġlet ting +Ġth rew +ile y +Ġch annels +l an +ĠVeg as +Ġse ar +Ġfant astic +ar re +uzz le +ĠD er +Th ose +Ġsw ing +Ġshe et +ind ex +co ver +og an +Ġvari ables +ĠTe ch +Ġsp oken +ac hel +ĠD a +ĠMount ain +Ġload ed +Ġfoot age +vers ion +Ġun l +ĠPh oenix +Ġthrow ing +Ġf iring +Ġtrack ing +Ġw idth +Ġstrugg ling +ro oms +ot ion +Ġmonth ly +ĠSer ver +Ġegg s +op en +M C +Ġ199 3 +Ġh ired +Ġstay ed +ĠAll en +Ġst ro +Ġ9 8 +st ep +ĠTurk ish +Ġfab ric +ist ing +ĠD om +Ġd ates +Ġpr on +Ġbasket ball +Ġl ucky +ĠArab ia +Ġassum ed +est y +Ġaff airs +Ġgl ad +ĠInd eed +ĠF A +ĠW ord +Ġjo ining +if ice +p read +ir ts +ĠSe lect +Ġpop ulations +aw are +Ġn ose +Ġcompl aints +st art +Ġsc oring +Th anks +Ġmin ing +Ġvisit ors +S H +Ġdam aged +Ġcharacter istics +ĠP ent +D C +Ġ8 3 +ĠS ix +r ates +Ġfl ags +ĠB rew +d og +M ark +// // +Ġexec ution +Ġj oke +ph ones +Ġtestim ony +Ġob st +Q L +ĠC ut +Ġstud ied +ĠN intendo +ick et +ĠN BC +Ġl ad +ĠB ra +ĠM oh +Ġk ernel +Ġoverwhel ming +Ġag ed +Ġapplic able +ĠC ond +Ġroad s +ĠBl ock +m ade +od ge +Ġcomm ands +Ġoff ices +vel and +Ġt ut +Ġrece iver +ĠF ro +Ġsho pping +Ġi P +ĠSt re +ĠA BC +Ġentertain ment +ĠB ow +ort ed +M c +Ġread s +gr ad +ĠCol lect +Ġâ ĪĴ +ĠCap ital +eder ation +Ġemploy er +Ġinvolve ment +Ġanx iety +al ia +Ġro of +ĠAm ong +ĠDemocr at +Ġstat s +ĠV ill +Ġconst itutional +Ġrefer ring +itt y +Ġtack le +out ube +Ġback ed +ĠH ong +ĠBro ad +Ġe le +ĠO tt +Ġ199 2 +h our +achus etts +C al +Ġdefe ated +Ġ8 1 +es p +Ġseem ingly +w as +ĠJ enn +ĠK urd +Ġg ene +Ġdisc ount +R et +EC T +( ); +Ġclub s +Ġs id +ĠM arsh +Che ck +Ġp p +ĠE ag +ides pread +Ġbe ings +F T +Ġintrodu ction +ĠCh ange +AR D +Ġ1 10 +ad ows +ier ce +Ġme al +a uthor +ĠB ang +lah oma +Ġr anks +201 1 +?? ?? +m ax +Ġcoll apse +Ġop ens +Ġe cho +Ġs oph +Ġrac ist +Ġenorm ous +Ġw aves +Ġt ap +Ġcomprehens ive +. -- +ĠR oy +Ġfarm ers +Rel ated +a ired +ron es +ĠC rim +Ġproport ion +Ġdesign s +Ġnegoti ations +Ġvirt ually +ĠBat man +Ġwar n +Ġlegit imate +m ate +Ġcon vention +, , +net ic +ĠS D +Ġconsist ently +Ġcompens ation +Ġpunish ment +Ġy e +Ġt ie +ĠB ureau +ir lf +ĠB u +ĠA ren +ĠPh ilipp +Ġkn ife +Ġmem ories +ĠR oss +Ġang le +Ġ8 6 +ĠTh under +Ġre nd +ĠT our +Ġcount s +s ung +ĠIm p +Ġeduc ational +Ġaccess ible +C OM +Ġd rew +y er +G l +am ine +OR T +O B +I B +m aster +Ġtri als +og y +h ar +ĠTr ust +Ġprefer red +irlf riend +ĠN ev +Ġb in +Ġc ow +P age +Ġsign ature +ĠB L +7 00 +Ġret ired +Ġby tes +Ġneigh b +ĠLeg end +Ġdev ast +Ġsuspect ed +is ons +ĠPoké mon +sc ale +Ġcap abilities +Ġre vel +Ġche ese +d y +igr ant +Ġfail ing +b its +ĠHer oes +ĠG host +ĠS cient +Ġappoint ed +ur i +Ġinst itution +Ġexpand ed +g reg +Ġmonitor ing +Ġp odcast +Ġcoal ition +Ġ9 6 +J o +Ġst olen +ĠS ab +Ġstop s +Ġhol iday +Ġint r +C ar +Bl ack +ĠL GBT +Ġwar ming +ĠAnd erson +Ġ8 9 +Ġprodu cer +M ed +Ġaccur acy +ĠMar vel +iz abeth +ĠPat rick +m ony +Ġmin i +ac les +Ġover t +the y +Ġmembers hip +ĠV en +Ġex ch +Ġrem oval +ĠD ave +T Y +m ad +ĠF ind +Ġad equ +Ġe c +Ġte eth +Ġemot ion +Ġper m +Ġsole ly +d b +Ġextra ord +IG HT +c al +Ġgu idelines +Ġd ying +Ġsusp ended +ĠPrem ier +ĠAnth ony +el ve +Ġd ad +ĠE th +ĠFoot ball +Ġabandon ed +Ġ< < +Ġm arch +Ġhor ror +âĢ¦ " +Ġchild hood +Ġcampaign s +Ġl unch +ĠAl bert +bl ock +âĸĪ âĸĪ +ound ing +Ġb one +or gan +ad ers +ĠFl ash +ĠDri ve +Ġton ight +Ġw ars +ĠF L +Ġform ation +con st +New s +Ġcom pe +or ious +ĠSt aff +Ġdiscuss ions +ĠProt ection +ĠJ am +Ġcrit eria +Ġinstall ation +Ġaccompl ish +iz za +Ġpub lisher +Ġresc ue +ĠT ry +U LL +ĠS om +ĠH op +ore t +th s +ord on +Ġp ocket +ĠIn v +Down load +ĠCr ime +Ġb ene +ĠGu ide +ĠAs sembly +Ġparam eters +I E +ĠAlex ander +Ġconc ert +ĠSc he +Ġsh oes +Ġvis iting +Ġrec all +Ġb ub +Ġr ural +Ġconc rete +ĠR os +N ext +R uss +Ġlo ans +ĠSh ield +Ġtre m +hem at +k g +ĠHar ris +is ition +ĠM ove +ĠF C +Ġf ate +ĠCh o +Ġt ired +Ġprinc ipal +h ist +ien ces +ath y +Ġse vent +Ġm ood +Ġstrateg ic +Ġdise ases +Ġfor um +Ġtem por +Ġhead quarters +P ar +ig e +fl ix +Ġgu itar +Ġ9 4 +On ly +Ġrele ases +ro ph +================ ================ +Ġ6 00 +ĠContin ue +ig ate +ĠC rit +sy stem +Ġdis abled +Ġunex pected +ith ub +Ġuncle ar +ĠE st +Ġcontr ad +Ġstrateg ies +vent ures +Ġpass age +AM E +Ġimpro ving +Ġreve als +Ġdecre ase +ov a +Ġann oy +ĠSh ort +ĠL ibrary +Ġcy ber +n ell +ĠH ur +ĠC B +Ġphot ograp +U I +Ġs ed +G e +Ġ8 7 +Ġd iverse +Ġencour aged +Ġcons piracy +Ġbird s +Ġoper ator +Ġhand ful +Ġclass ified +? ) +Ġdram atic +Ġinvestig ators +it o +Ġw idespread +ĠR oom +-------------------------------- -------------------------------- +Ġcollect ive +Ġjournal ist +St ring +Ġtemper atures +il a +Ġgu id +Ġins pect +Ġmiss ile +ĠMay or +Ġman ual +Ġsim ultane +Ġrat ings +Ġsu ck +Ġ9 7 +Ġunivers al +Ġph arm +Ġdis rupt +ian o +A V +Ġf t +Ġstat ist +old s +ĠWalk er +ph p +Ġunder t +ĠL as +ish op +nt il +res hold +ĠWhe ther +M s +Ġden y +ĠCl oud +Ġprov ider +Ġsurv iv +ĠUp date +h as +Ġmist akes +ch arge +pl ed +r ity +Ġn ode +ĠMass achusetts +ool s +lic ation +Ġf ails +em ale +or i +back s +Ġsh irt +Ġ' ' +ĠN AT +Ġwat ers +els on +Ġe ase +Ġsc ar +Ġcont ents +m ind +Ġcont ribution +Ġsh r +Ġhand ed +Ġst ability +Ġtra ve +E m +Ġmir ror +12 3 +Ġwe igh +Ġf iction +ou ver +ist ant +r ition +ĠF ed +Ġphys ically +Ġst ake +ĠArt icle +ĠAr c +ĠLew is +ĠM ind +Ġdemonstr ate +Ġprof its +v ision +om ic +ol id +Ġbatt les +Ġdri ves +Ġeas tern +ĠS ony +!! ! +ar ation +v ard +ĠG L +port ation +Ġ9 2 +Ġlaw makers +Ġprotect ing +ĠE PA +Ġy eah +Ġsh ame +ol ph +e ven +x it +Ġatt ach +Ġrepresent ing +Ġob s +ĠUt ah +iff s +ĠFre edom +à ³ +A K +Ġinc idents +it age +Ġview ers +c d +Ġm ouse +Ġcl ar +Ġaccord ance +Ġb ot +c or +ĠSum mer +he ld +Ġinnoc ent +Ġiniti ative +ol s +________________ ________________ +Ġsp ots +p ace +Ġconvent ional +Ġcorpor ations +Ġblock ed +H D +at tered +Ġref ers +Ġbu ck +ĠDig ital +12 0 +Ġtop ics +T F +Ä ģ +br id +re ement +Ġunder lying +ĠM ember +Ġinvestig ating +Ġpregn ancy +Ġtouch down +ĠB and +ĠCall er +Ġinst ances +P P +w a +G ood +Ġ199 1 +ĠC old +Ġfear s +Ġrem arks +Ĩ Ĵ +at al +Ġm it +Ġexper iments +i pt +Col or +ind u +Up date +Ġ9 3 +A g +Ġ å +anc ouver +B oth +Ġjud ges +Ob ject +Ġst ere +umb n +Ġparticip ation +ĠSt ars +ĠJ ere +Ġweek ly +ĠB an +Ġconvers ations +ĠP itt +u z +ĠIndian a +ĠK ick +Ġinf ection +Ġhero es +Ġsett led +Ġstri p +Ġh al +Ġd ump +ĠS ci +Ġl es +Ġref erences +ĠU RL +ĠBr idge +Ġwant ing +For ce +Ġex clus +Me anwhile +m n +Ġg entle +m aker +sen al +ĠG ro +ou ri +ĠR ain +ĠAll iance +Ġl ift +el a +S D +ĠCle veland +Ġrank ed +Ġst adium +Ġdead ly +ä ¸ +Ġr iding +ar ia +ĠAr mor +Ġdocument ation +ĠGree ce +ree k +Ġl ens +ĠS a +Ġg ross +ĠE mer +ag ers +ĠD ub +ĠR h +ĠAM D +Ġarri val +Ġdes ert +Ġsupp lement +ĠRes p +Ġkn ee +Ġmarg in +f ont +og g +201 0 +ĠP ir +ĠP rom +iv als +Ġint ake +Ġdifferent ly +ug s +Ġb its +clud ed +Ġsearch ing +ĠD u +um ble +Ġfunction al +ĠBalt imore +ĠC ould +Ġdes ired +Ġcirc uit +ĠL yn +ĠG O +ĠF alse +re pre +' : +alt ies +Ġmin im +Ġdro ve +ĠSh ould +Ġh ip +Ġpro s +Ġut ility +ĠN ature +ĠM ode +P resident +o pp +r at +form ance +Ġconcent ration +Ġf ont +ĠB ud +Ġam id +Ġre vers +ĠM L +B ar +Ġinter action +Ġjur isd +Ġspell s +d ep +f il +Ġcivil ians +ut ter +ĠCo oper +ĠBel ow +Ġent rance +Ġcon vert +Ġcontrovers y +ow ered +Ġcontr ary +Ġar c +ĠExec utive +ĠOffic er +Ġpack ages +Ġprog ressive +w idth +Ġreserv ed +v ol +ĠSam sung +Ġprint ed +Ġcent ers +Ġintrodu ce +ĠKenn edy +Ġodd s +Ġsure ly +Ġindepend ence +Ġpass engers +repre ne +ĠBe h +Ġl oves +ĠESP N +Ġfac ilit +Ġident ical +Ġdo ct +Ġpartners hip +con f +ĠH ide +Ġconf used +ĠC ow +M en +Ġw rest +ĠIraq i +Ġh oles +ĠStud ies +Ġpregn ant +h ard +Ġsign als +I X +Ġpull ing +Ġgrad uate +Ġnomine e +D ate +Ġper mitted +Ġâ Ĥ¬ +ĠOk lahoma +St art +Ġauthor ized +Ġal arm +ĠC os +v an +Ġgener ations +c ular +Ġdr agon +ĠSoft ware +ĠEd ward +Ġcontro ller +S en +ge red +ĠV ik +Ġappro ached +Th ank +Ġcan ce +Ġform ula +ĠSm all +Ġweak ness +Ġr amp +it udes +j ud +Ġbrill iant +Ġacc us +s ource +Ġ8 00 +ĠE vil +S w +Ġhom eless +we ek +i ens +r ics +ĠTh ird +T O +Ġorgan ic +Ġpresent ation +ag h +ĠDown load +v ation +Ġas sembly +or able +hold ers +ĠBern ie +ĠHel p +Ġt ong +ĠF ight +Ġbe ach +B ook +ĠL ic +Ġr ush +ĠR ound +ou p +ĠMar x +Ġcalcul ated +ĠDe vil +ĠSar ah +Ġoccasion ally +Ġbul let +Av ailable +g ate +Ġ9 1 +Ġh osp +Ġprom ises +ĠH IV +ĠSt adium +ĠSt ock +ĠCorpor ation +g age +N G +ĠC redit +Ġs ne +ib l +Ġacc um +s uch +Ġterror ists +Ġconscious ness +ĠZ h +Ġdram a +ool a +pir ation +Ġlab our +ĠN in +Ġut ter +Ġdemocr atic +Ġass ass +il ation +Ġg est +Ġab road +Ġmet ab +Ġs orts +Ġfl av +U B +Ġm g +ĠNot hing +ĠO d +Ġmus ical +200 9 +Ġdro ps +oc ated +ater al +0000 00 +Ġg re +Ġequ ality +Ġburd en +Ġv ig +ĠLe ader +-------- ---- +Ġcere mony +Ġf ighter +Ġact ors +Ġ æ +am an +F i +Ġal ign +put er +Ġe lder +ĠN SA +Ġrepresent ation +ĠOnt ario +IT H +usal em +Ġharass ment +itz er +Ġsy mp +Ġbox es +ĠD R +Ġman ifest +at re +Ġ ^ +Ġd ies +le ton +Ġmiss ions +et he +Ġres olve +Ġfollow ers +Ġas c +Ġk m +l ord +am med +Ġsil ent +ĠAssoci ated +Ġtim ing +Ġprison ers +ĠK ings +ĠF ive +Ġtow er +Ġappro aches +Ġprecise ly +Ġb ureau +ĠM other +ĠI ss +Ġkey board +it ual +Ġfund ed +Ġstay ing +Ġpsych ological +Ġm ile +ĠLe on +ĠBar b +w ill +Ġw ider +ĠAtl antic +Ġt ill +ĠR ome +ro t +Ġaccomp an +Ġfl our +ac o +W orld +ĠExp ress +ĠY u +C or +Ġple ased +part y +Ġpoint ing +Ġinf lation +Ġro y +Ġ ), +ain er +Ġwedd ing +orm on +Ġrequ iring +Ġqual ified +Ġse gment +EN D +Ġs izes +e als +Ġcor rupt +ass ador +Ġcele b +Ġdream s +ĠM ess +Ġcheck ing +ĠV ersion +Ġprep aring +Ġact ively +ĠD iff +Ġl ux +ĠW inter +act eria +ĠN E +Ġdep uty +Ġtrans gender +Ġsum mary +Ġin her +er ies +ch ar +ĠY an +Ġkn ock +ĠP ath +Ġl ip +roll er +Ġimp ression +Ġcelebr ate +Ġsl ide +Ġgu ests +Ġcl ip +F S +Ġsav ings +Ġcapt ain +Ġleg acy +ĠDen ver +Ġw ounded +tab oola +AC T +Ġpurs ue +Ġo xy +Ġ q +Ġsem i +ĠN eed +ĠAff airs +Ġob sc +Ġcheck ed +Ġd ual +C ode +ĠM D +le m +ult y +Ġ © +ĠEl izabeth +Ġcent uries +ard ed +s rc +Ġev ident +enn is +at in +Ġunemploy ment +ĠMar io +Ġint im +Ch rist +Ġbi ological +Ġsold ier +ĠAdd ed +Ġm ath +ĠG il +Ġbi as +Ġd ating +ĠO cean +Ġm ice +M us +h ire +ĠT es +Ser ver +lim ited +S ize +Ġmet ers +Ġrock et +es see +Ġcertific ate +ĠIran ian +AS S +Ġgr id +D ec +Ġro lling +com mun +ĠSwed en +b ury +Ġtiss ue +Ġrac ism +ĠL ocal +Ġmyster y +Ġexam ine +Ġst em +Ġs its +Ġhop ed +ot ing +Ġdial ogue +Ġpers u +W atch +l ay +M AN +Ġch ronic +ĠPort land +mark et +ĠS EC +Ġparalle l +Ġsc andal +Ġcar ries +Ġphenomen on +h uman +ack er +ĠO x +Ġretire ment +tain ment +ov ie +ĠG ear +Ġd uties +Ġdo se +Ġsc roll +M B +in f +Ġsa uce +Ġland scape +red dit +ĠChampions hip +ĠRed dit +al id +Ġco in +Ġover s +Ġpost ing +ab out +Ġf el +and y +Ġb old +Ġfocus ing +e ffect +G R +Ġde emed +Ġrecommend ations +Ġste pped +Ġvot er +ĠDe ep +ĠInst agram +Ġmoder ate +ĠMary land +Ġrestrict ed +ĠM B +ĠCh all +Ġto b +Ġc ir +ĠO cc +ĠE ver +Ġcoll aps +IN FO += - +ĠP ict +ĠAcc ount +n c +Ġo ught +Ġex port +Ġdr unk +( ' +Ġw ise +ĠM ort +ne cess +Ġan cest +ĠInc re +Ġfrequ ent +m ir +Ġinterpret ation +Ġdepend ent +Ġco ins +ĠB ol +V ideo +ĠJust in +Ġfat al +Ġcook ing +Ġconf usion +ip her +Ġcust ody +ĠMor gan +om ach +ĠGovern or +Ġrestaur ants +el ing +Ġacknowled ged +Ġthe r +Ġgen es +ch ing +He y +Ġtact ics +ĠMex ican +Ġv end +Ġhe s +qu er +Ġnot ing +ĠCamer on +Ġtarget ing +ro ck +Ġcred its +Ġemot ions +Ġrepresent atives +new s +Ġlegisl ative +Ġrem oving +Ġtweet ed +ĠCar ter +ĠF ixed +Ġfor cing +Ġspeak er +Ġm ales +ĠViet nam +l ined +Ġconcept s +Ġvo ices +o ir +ĠT rib +W he +ĠJer usalem +ĠS ant +Ġc ul +Ġl ady +ĠHaw ai +Ġar ts +ĠIn n +ĠMach ine +ĠEm peror +Ġsl ot +g ly +ĠPro cess +II I +Ġathlet es +ĠTem ple +ĠRep resent +Ġpres c +Ġt ons +Ġgold en +Ġp unch +ĠG R +iver pool +Ġen act +Ġlob by +Ġm os +Ġpick ing +Ġlif etime +Ġcogn itive +E ach +z o +Ġd ub +Ġcons ists +ol n +Ġf estival +am ous +Ġint ellig +w ords +ĠSm art +Ġde le +Ġl apt +Ġmag ical +ĠS in +b us +ur ities +igh th +ĠRub y +ĠS ure +ol ving +Ġj un +O ST +Ġimp osed +Ġast ron +Ġcor rel +ĠN S +ĠK it +ĠF uture +b urn +Ġimm une +oc us +Ġcour ses +ĠSt ring +Ġle an +Ġg host +Ġout comes +Ġexp ense +Ġevery day +Ġaccept able +A h +Ġequ ipped +Ġor ange +F R +ĠD utch +Th ough +ĠR ank +Q U +ĠRober ts +wh at +re nd +Ġdisapp ear +Ġsp awn +ĠL am +o is +Ġdes erve +Ġmin imal +Ġnerv ous +ĠW ould +Ġro ok +ĠV ancouver +Ġres ign +sh ire +ĠW orks +ĠB uild +Ġafford able +ĠG ary +ĠAren a +Ġh anging +Ġimpl ications +ĠS ong +Ġmain taining +Ġgu ards +C ON +Ġder ived +Ġexecut ed +Ġthe ories +Ġqu oted +ĠAnd re +og a +sel ess +in fo +ĠBel g +Ġt ears +ĠSur v +Ġbirth day +ig ious +im mer +Ġspect rum +Ġarchitect ure +Ġrec ruit +arm a +T able +Ġmon sters +ĠG ov +Ġdest ination +Ġattract ive +Ġf oss +ĠMore over +Ġpres ents +TH E +Ġrep ly +pt on +Ġc um +Ġdel ight +Ġaffect s +Ġdon ations +ĠT oy +ĠH im +M ENT +Ġover come +it ched +ĠFant asy +ĠH at +ĠBe ast +b ott +Ġinvestig ations +R un +Ġhun ting +d i +f und +Ġs essions +est yle +Ġport ray +oid s +Y eah +Ġcommun icate +Ġcom edy +ĠY ang +Ġbel t +ĠMar ine +Ġpredict ed +Pl ay +Ġimportant ly +Ġremark able +Ġelim inate +D avid +Ġb ind +V ID +Ġadvoc ates +ĠG aza +im p +D B +ĠN a +ĠSim ilar +I ES +Ġchar ity +v as +m ath +Ġâ ĸ +ok er +nd um +Ġcap s +ĠH al +2 000 +e an +Ġfle et +Ġrec re +R ight +Ġsleep ing +ij ing +k ind +Ġdesign ated +à ¤ +Ġanim ation +ke e +ĠInt rodu +Ġ/ > +Ġdelay ed +Ġtrem end +Ġcur ious +U se +Ġle ct +d am +Ġinnov ation +ĠPoint s +Ġload ing +Ġdisp ute +ct ic +ird s +ĠB Y +Ġn urs +ĠVal ue +ION S +ĠH um +Ġtem plate +m ers +Ġappear ances +ĠEnter tainment +Ġtransl ation +Ġsa ke +Ġbene ath +Ġin hib +Ġe uro +abet es +Ġstud ying +ĠM as +Ġper ceived +Ġexam ined +Ġe ager +Ġco aches +Ġim per +ch i +Ġprodu ces +" ). +ĠEvery one +Ġm unicip +Ġg irlfriend +Ġh ire +ĠV ice +Ġsu itable +op y +Ġin equ +ĠD uke +f ish +f irst +ĠO bs +Ġinter ior +ĠBru ce +ĠR y +Ġanal ys +Ġconsider able +Ġfore cast +Ġf ert +ors hip +ĠD rug +ĠA LL +: " +th ur +ĠM ail +Ġball ot +Ġinst antly +ĠCh annel +Ġp icks +Ġ198 9 +Ġt ent +ol i +Ġcivil ian +b ling +ell o +b u +Ġin ch +Ġlog o +Ġcooper ation +Ġwal ks +Ġinvest ments +Ġimp rison +ĠF estival +ĠK y +Ġleg ally +Ġg ri +ch arg +S l +Ġthreat ening +du ction +fl ow +Ġdismiss ed +ibr aries +c ap +e le +ĠMc G +ĠHar vard +ĠConserv ative +ĠC BS +p ng +Ġro ots +ĠH aving +umb led +ĠF un +\ / +ĠS earch +ple x +Ġdiscuss ing +Ġcontin u +ĠT ai +ĠW ik +F ree +f it +Ġref use +Ġmanag ing +Ġsy nd +ip edia +w alk +Ġprofession als +Ġguid ance +Ġunivers ities +Ġas semb +unt u +F inally +AS E +ĠAut o +ĠH ad +Ġann iversary +L D +ĠD ur +ĠUlt imate +ih ad +pro duct +Ġtrans it +Ġrest ore +Ġexpl aining +Ġass et +Ġtransfer red +Ġbur st +ap olis +ĠMag azine +ĠC ra +ĠB R +gg ed +ĠH E +M ich +b et +ĠL ady +yl um +erv es +Ġme ets +wh ite +L og +Ġcorrespond ing +Ġins isted +G G +Ġsurround ed +Ġt ens +Ġl ane +Ġco inc +h ome +Ġexist ed +ect ed +ĠDou ble +lam m +Ġske pt +ex p +Ġper ception +ie v +ĠBe ing +o ft +Ġadop t +. : +] ; +Wind ows +Ġsatell ite +AS H +Ġinf ant +d escription +ĠMe anwhile +c m +oc a +ĠT reat +act or +Ġtob acco +ĠN orm +em ption +Ġfl esh +Ġj e +o op +ĠHe aven +Ġbe ating +an im +Ġgather ing +Ġcult iv +G O +ab e +ĠJon athan +ĠSaf ety +Ġbad ly +pro t +Ġcho osing +Ġcontact ed +Ġqu it +Ġdist ur +Ġst ir +Ġto ken +D et +ĠP a +Ġfunction ality +00 3 +s ome +Ġlimit ations +Ġmet h +b uild +con fig +N T +re ll +ble m +ĠM om +Ġveter ans +ĠH u +Ġtrend s +are r +ĠG iven +ĠCa ption +m ay +AS T +Ġwond ering +ĠCl ark +n ormal +Ġsepar ated +Ġdes p +st ic +b rew +Ġrel ating +ĠN ik +ĠF arm +Ġenthus i +g ood +d eb +Ġactiv ist +Ġm art +Ġexplos ion +ĠEconom ic +L ink +Ġins ight +Ġconven ient +Ġcounter part +su pport +ĠV irt +ag en +ĠTenn essee +ĠSim on +ĠA ward +OC K +ĠF igure +Ġoverse as +Ġpr ide +ĠC as +n ote +m g +C urrent +Ġdispl ays +cont ent +Ġtravel ing +Ġhosp itals +ĠFin ancial +ĠP ast +Ġdefend ant +Ġstream ing +m ble +ĠBer lin +uk i +Ġdist ribut +Ġant ib +Ġch ocolate +ĠCast le +Ġinter rupt +ĠR ow +Ġconvers ion +Ġbug s +ĠR ather +li est +L Y +ĠJe an +com mon +ak h +Ġ1 30 +ot ton +ĠDe an +Ġam endment +Ġgame play +ĠWar ren +od a +Ġhigh lights +Ġir re +ĠNAT O +Ġball s +Ġdemand ing +U RE +ĠL uke +F igure +st op +on ia +z one +iz ers +ĠW R +Ġaward ed +Ġregul atory +ĠH art +ĠS N +pl ing +Ġs our +ĠP ixel +us ive +Ġf et +ĠS ent +Ġautom atic +Ġf er +vern ment +ĠKh an +T ON +f ather +Ġextraord inary +th rop +ĠP ython +ĠG PU +Ġsex ually +Ġdesk top +it ivity +ĠAnton io +Ġo rient +Ġe ars +ob by +ous es +vertis ements +Ġmanufacture rs +ic ient +min ute +Ġconv iction +Ġg arden +p ublic +Ġsatisf ied +f old +O K +Ġin hab +ĠTh ink +Ġprogram me +Ġst omach +Ġcoord in +Ġh oly +Ġth reshold +Ġr het +Ġser ial +Ġemploy ers +ĠEvery thing +ra h +Ġb other +Ġbr ands +Val ue +ĠT ed +ĠPlan et +Ġp ink +ĠFurther more +s a +P E +re ck +ĠUS D +ot te +Ġ& & +Ġland ed +g ets +Ġprodu cers +Ġhealth care +Ġdomin ant +Ġdest ro +Ġam ended +ch ron +Ġf its +ĠSy d +ĠAuthor ity +AT CH +Ġfight s +ĠL LC +Ġ-- - +ĠCor p +Ġtox ic +spe cific +ĠC orn +ĠChe l +Ġtele phone +ĠP ant +Ġmyster ious +aun ch +od ox +med ia +Ġwitness es +ag u +Ġquestion ed +ĠBre xit +ĠRem ember +ene z +Ġend orse +iat ric +ĠId ent +Ġridic ulous +1 10 +Ġpr ayer +Ġscient ist +Ġ19 50 +ĠA qu +Ġunder ground +ĠU FC +m are +ĠL ater +w ich +Ġsubsc rib +Ġhost s +Ġer r +Ġgr ants +ant om +Ġsum mon +ear ly +ĠC lear +ĠPr im +Ġsusp ension +Ġguarant eed +app er +Ġr ice +ĠSe an +ĠSh in +Ġrefere ndum +Ġfl ed +r ust +Ġ3 60 +ter y +Ġsh ocked +B R +ĠO il +ĠAll ah +Ġpart ly +Ġign or +Ġtrans mission +Ġhom osexual +ivers al +Ġhop efully +ãĤ ¤ +Ġless on +L eg +Ġ .. +Y et +t able +app ropri +re tt +Ġbo ards +Ġincor rect +Ġb acteria +ar u +am ac +Ġsn ap +.' " +Ġpar ad +t em +he art +Ġav ailability +Ġw isdom +Ġ( + +Ġpri est +ĠÂł ĠÂł +O pen +Ġsp an +Ġparam eter +Ġconv ince +Ġ( %) +r ac +Ġf o +Ġsafe ly +Ġconver ted +ĠOlymp ic +Ġres erve +Ġhe aling +ĠM ine +M ax +Ġin herent +ĠGra ham +Ġinteg rated +D em +Ġpip eline +Ġapp lying +Ġem bed +ĠCharl ie +Ġc ave +200 8 +Ġcons ensus +Ġre wards +P al +ĠHT ML +Ġpopular ity +look ing +ĠSw ord +ĠAr ts +' ) +Ġelect ron +clus ions +Ġinteg rity +Ġexclus ively +Ġgr ace +Ġtort ure +Ġburn ed +tw o +Ġ18 0 +P rodu +Ġent reprene +raph ics +Ġg ym +ric ane +ĠT am +Ġadministr ative +Ġmanufacture r +Ġ vel +ĠN i +Ġisol ated +ĠMedic ine +Ġback up +Ġpromot ing +Ġcommand er +Ġfle e +ĠRus sell +Ġforg otten +ĠMiss ouri +Ġres idence +m ons +Ġrese mb +Ġw and +Ġmeaning ful +P T +Ġb ol +Ġhe lic +Ġwealth y +Ġr ifle +str ong +row ing +pl an +as ury +âĢ¦ . +Ġexpand ing +ĠHam ilton +Ġrece ives +S I +eat ures +ĠAn im +RE E +P ut +Ġbrief ly +ri ve +Ġstim ul +Ġ`` ( +Ġ __ +Ġch ip +Ġha z +Ġpri ze +ĠTh ings +AC E +ul in +d ict +ok u +Ġassoci ate +ock ets +y outube +St ory +ateg ory +Ġm ild +ail ing +ĠY e +O rig +ĠK a +or ig +Ġpropag anda +Ġan onymous +Ġstrugg led +Ġout rage +AT ED +ĠBe ijing +r ary +Ġle ather +Ġworld s +Ġbroad er +12 5 +id al +ĠBet ter +Ġt ear +E xt +Ġpropos als +Ġit er +ĠSqu ad +Ġvol unt +m i +D id +ĠP u +p in +Ġspeak ers +Ġb orders +Ġfig ured += ' +Ġsimultane ously +aed a +Ġcharg ing +Ġur ged +Ġcon j +25 6 +ĠG ordon +mer ce +Ġdocument ary +Sh are +it ol +ON E +ĠG arden +h att +ĠThom pson +ane ous +ap ore +Ġt anks +Ġless ons +tr ack +Ġout standing +Ġvolunte ers +Ġsp ray +Ġmanag ers +l arge +Ġcamp s +Ġart ificial +ĠR u +Ġb ags +th al +Ġcompat ible +ĠBl ade +Ġf ed +Ġarg ues +F I +Ġunf air +Ġcor n +Ġoff set +Ġdirect ions +Ġdisappoint ed +ĠCon vention +Ġview ing +M E +oc ity +Ġtown s +Ġlay ers +Ġro lled +Ġjump ed +Ġatt ribute +Ġun necess +inc oln +Ġsupp ose +ĠNet her +ch a +Ġbur ied +Ġsix th +B en +ress ing +OU R +Ġw ound +Ġcy cl +Ġmechan isms +Ġcongress ional +ĠE lement +Ġagre ements +Ġdec or +Ġclos est +ĠM it +Go ogle +} } +Ġm ixture +Ġflu id +S ign +ĠSch olar +Ġp ist +ask et +ab ling +Ġrac ing +he ro +ri el +ass y +Ġche aper +b en +Ġvert ical +amac are +ĠRead ing +g ments +Ġhelic op +Ġsacr ifice +ay a +p aren +V A +ĠL es +ĠStud io +Ġviol ations +ĠAn na +ac er +é ¾ +ĠR at +ĠBe ck +ĠD ick +ĠA CT +Ġcomp osition +Ġtext ure +ĠO wn +Ġsmart phone +ĠN A +Ġfor b +im port +Ġdef ending +il st +re r +Ġo h +ĠJere my +Ġbank ing +cept ions +Ġrespect ive +/ . +Ġdr inks +ĠW i +Ġb ands +ĠL iverpool +Ġg rip +ĠB uy +Ġopen ly +Ġreview ed +per t +Ġver ify +ĠCo le +ĠW ales +M O +Ġun pre +Ġshel ter +ĠIm perial +Ġgu i +ĠD ak +Ġsuggest ions +Ġexplicit ly +Ġsl ave +Ġblock chain +Ġcompet ing +Ġprom ising +S ON +Ġsoc cer +Ġconst itution +4 29 +Ġdist ract +ĠU ser +es ides +ĠMet hod +ĠTok yo +Ġaccompan ied +Cl ient +s ur +al og +Ġident ification +Ġinv asion +as ma +Ġindust ries +pp ers +Ġsub tle +ĠUn it +n atural +Ġsurv ived +Ġfl aw +ĺ ħ +ĠH oll +Ġdef icit +Ġtut orial +ĠCh ance +Ġarg uing +Ġcontem porary +Ġinteg ration +for ward +Ġt um +it is +Ġh iding +ĠD omin +ĠT an +ĠB uilding +ĠV in +Ġspokes person +ĠNot es +Ġemer ging +Ġprepar ation +Ġpro st +Ġsuspect s +Ġaut onom +D escription +Ġdeal t +ĠP ear +Ġstead y +Ġdecre ased +Ġso vere +ĠCl in +Ġgrad ually +ors es +ĠW AR +S erv +ãĤ ¢ +h r +Ġd irty +ĠB arn +ĠB C +Ġd il +Ġcal endar +Ġcompl iance +Ġch amber +b b +Ġpass enger +ate ful +ĠT itle +ĠSyd ney +ĠG ot +Ġdark ness +Ġdef ect +Ġpack ed +ass ion +Ġgod s +Ġh arsh +IC K +le ans +Ġalgorith m +Ġoxy gen +Ġvis its +Ġbl ade +Ġkil omet +ĠKent ucky +Ġkill er +P ack +enn y +Ġdiv ine +Ġnom ination +be ing +Ġeng ines +Ġc ats +Ġbuff er +ĠPh ill +Ġtra ff +AG E +Ġtong ue +Ġrad iation +ere r +m em +ĠExpl icit +é¾ į +Ġcou ples +Ġphys ics +ĠMc K +Ġpolit ically +aw ks +ĠBl oom +Ġwor ship +e ger +ut er +ĠF O +Ġmat hemat +Ġsent enced +Ġdis k +ĠM arg +Ġ/ * +P I +Ġoption al +Ġbab ies +Ġse eds +ĠScott ish +Ġth y +] ] +ĠHit ler +P H +ng th +Ġrec overed +ing e +Ġpow der +Ġl ips +Ġdesign er +Ġdis orders +Ġcour age +Ġch aos +" },{" +Ġcar rier +b ably +H igh +ĠR T +es ity +l en +Ġrout es +u ating +F il +N OT +w all +s burgh +Ġeng aging +ĠJava Script +ore r +li hood +Ġun ions +ĠF ederation +ĠTes la +Ġcomple tion +ĠT a +Ġprivile ge +ĠOr ange +Ġne ur +paren cy +Ġb ones +Ġtit led +Ġprosecut ors +ĠM E +Ġengine er +ĠUn iverse +ĠH ig +n ie +o ard +Ġheart s +ĠG re +uss ion +Ġmin istry +Ġpen et +ĠN ut +ĠO w +ĠX P +in stein +Ġbul k +S ystem +ic ism +ĠMarket able +Ġpre val +Ġpost er +Ġatt ending +ur able +Ġlicens ed +ĠG h +et ry +ĠTrad able +Ġbl ast +à ¤ +ĠTit an +ell ed +d ie +H ave +ĠFl ame +Ġprof ound +Ġparticip ating +Ġan ime +ĠE ss +Ġspec ify +Ġregard ed +ĠSpe ll +Ġs ons +own ed +Ġm erc +Ġexper imental +land o +h s +ĠDun geon +in os +Ġcomp ly +ĠSystem s +ar th +Ġse ized +l ocal +ĠGirl s +ud o +on ed +ĠF le +Ġconstruct ed +Ġhost ed +Ġsc ared +act ic +ĠIs lands +ĠM ORE +Ġbl ess +Ġblock ing +Ġch ips +Ġev ac +P s +Ġcorpor ation +Ġo x +Ġlight ing +Ġneighb ors +ĠU b +ar o +Ġbe ef +ĠU ber +F acebook +ar med +it ate +ĠR ating +ĠQu ick +Ġoccup ied +Ġaim s +ĠAdd itionally +ĠInt erest +Ġdram atically +Ġhe al +Ġpain ting +Ġengine ers +M M +ĠM ust +Ġquant ity +P aul +Ġearn ings +ĠPost s +st ra +ãĥ¼ ãĥ +Ġst ance +Ġdro pping +sc ript +Ġd ressed +M ake +Ġjust ify +ĠL td +Ġprompt ed +Ġscr ut +Ġspeed s +ĠGi ants +om er +ĠEd itor +Ġdescrib ing +ĠL ie +ment ed +Ġnow here +oc aly +Ġinst ruction +fort able +Ġent ities +Ġc m +ĠN atural +Ġinqu iry +Ġpress ed +iz ont +for ced +Ġra ises +ĠNet flix +ĠS ide +Ġout er +Ġamong st +im s +ows ki +Ġclim b +ne ver +Ġcomb ine +d ing +Ġcomp r +Ġsignific ance +Ġremem bered +ĠNev ada +ĠT el +ĠSc ar +ĠWar riors +ĠJ ane +Ġcou p +b as +Ġtermin al +, - +O H +Ġt ension +Ġw ings +ĠMy ster +�� �� +ĠUn like +val id +viron ments +ĠAl i +Ġn aked +book s +ĠM un +ĠG ulf +Ġd ensity +Ġdim in +Ġdesper ate +Ġpres idency +Ġ198 6 +h y +IN D +Ġun lock +im ens +Ġhand led +ĠE b +Ġdisapp eared +Ġgen re +Ġ198 8 +Ġdetermin ation +St ream +ik o +ap ters +Ġacknow ledge +J an +Ġcapital ism +P at +Ġ20 20 +Ġpain ful +Ġcur ve +Ġbom bs +st orm +ĠMet al +en cer +ĠF ig +ĠA aron +anc hes +Ġins piration +Ġexha ust +t ains +ash i +Ġdesc ript +Ġr itual +ĠChel sea +Ġpromot ion +ĠH ung +ĠW ard +iv a +ĠE T +Ġto ss +all ow +ĠFranc is +D ep +Ġhapp iness +ĠGl ass +Ġbet a +Ġstreng then +N E +o a +Ġbutt ons +ĠMur ray +Ġkick ed +Qu est +ĠT alk +ĠS everal +ĠZ ero +Ġdr one +ul k +Ġc am +ĠM obile +Ġprevent ing +Ġret ro +ĠA x +Ġcru el +Ġflo at +. ), +Ġfil ing +ĠGr ant +ĠB or +Ġr ib +Ġchampions hip +ĠM erc +Ġsty les +Ġc ake +Ġbuild s +ĠS elf +io x +Ġep ic +oy d +B el +ĠSt ew +. ( +ah u +ĠBe yond +Ġout s +Ġsol o +ĠT ree +Ġpres erve +Ġt ub +AR E +ro c +ĠIm pro +ĠW right +Ġbu nd +Ġtr aged +Ġoccas ional +b ian +Sec ond +r ons +Ġinter actions +form ed +s ing +Ġown s +Ġh ockey +Gener al +Ġlog ical +Ġexp end +Ġesc al +ĠGr iff +ĠC rown +ĠRes erve +Ġsto pping +Ġexc use +sec ond +Ġoper ated +Ġre aches +ĠMal ays +Ġpoll ution +ĠBrook lyn +Ġde lete +Ġhas h +Bl ock +ah a +âĢ ³ +Ġsh orter +p iece +> >> +ĠM ormon +t or +Ġpartic les +ĠB art +ry ption +Ġad min +Ġsqu ee +VID IA +Ġcreat or +iam eter +ic ular +N BC +Ġgrab bed +Ġn odd +Ġr ated +Ġrot ation +Ġgr asp +Ġexcess ive +ĠE C +ĠWh it +Ġinvent ory +ault s +ĠF B +Ġe cosystem +Ġbill ions +Ġvent ure +n amed +Ġdef ender +out e +Inst ead +ir able +W ar +Ġassum ption +Ġb ite +Ġearth qu +t ail +sp ace +Ġgif ts +boy s +Ġinev itable +Ġstruct ural +Ġbenef icial +Ġcompe lling +h ole +erv ation +Ġco at +o j +inc arn +ĠY ears +Ġdetermin ing +Ġrhet oric +Ġbound aries +Ġwh ites +A nt +add y +) - +ra ham +eter min +Ġhar vest +ĠCon c +Ġlapt op +ĠM atch +Ġenjoy ing +cc a +oll ar +Ġtri ps +Ġadd iction +ĠS ak +Ġpow ered +Ġc ous +ĠRuss ians +ie re +Ġret rie +qu ality +Ġdiff er +Ġking dom +ĠL aur +ĠCap itol +Ġcon clusions +ĠAl tern +ĠN av +Ġtrans parent +B ER +G roup +ĠCom plete +Ġinf er +Ġint rig +Ġins ane +R O +oph ob +is en +qu al +Mich ael +Ġm useum +ĠP ope +Ġres et +r ative +f ive +Ġagg reg +itte es +osit ory +Ġcar b +ĠRec ord +Ġdec ides +ĠF ix +Ġexcept ions +ĠCommission er +un s +ĠEnvironment al +Ġlegend ary +ist ence +Ġtun nel +k m +Ġins ult +Ġt roll +Ġsh ake +Ġdet ention +qu es +ĠCh rome +ĠF iles +Ġsub t +Ġprospect s +Ġpro l +re nder +pro of +Ġperform ances +St r +Ġh ref +ern ame +Ġachieve ment +Ġf ut +F ull +ĠLe ban +go ogle +ãĥ Ī +amp a +May be +Ġproject ed +ĠE mb +Ġcol leg +Ġa wards +Ġâ Ķ +G old +ĠBl ake +ĠR aj +if ting +Ġp ending +Ġinst inct +Ġdevelop ments +Con nect +ĠM and +ĠW ITH +ĠPhilipp ines +prof ile +Ġalt ogether +ĠB und +ĠT D +oo oo +amp ed +ip h +Ġste am +Ġold est +Ġdet ection +ul pt +Ġ ç +ĠWay ne +200 6 +f a +Ġcir cles +ĠF u +Ġdon ors +appropri ate +ĠDak ota +j amin +Ġmotiv ated +Ġpurch ases +ĠLouis iana +ĠS pl +Ġgl obe +Ġ10 5 +z ip +c all +Ġdepart ments +Ġsustain able +10 5 +ĠO P +if iers +Ġprevent ed +Ġinc omp +ĠComm ander +Ġdom inated +Ġ » +Ġinvest ed +Ġcomplex ity +Ġin cl +Ġens uring +Ġreal m +yn c +ĠInd ependent +r ained +ĠJ en +ĠFl ight +Ġat he +Ġspec ulation +ĠT E +oc ate +t ic +Ġpl aint +her ry +Ġto y +Ġ1 11 +Ġpl ates +st atus +ĠIs a +Ġdev oted +C op +ĠE S +25 5 +ur rency +M ain +Ġsl aves +Ġpe pper +Ġqu otes +Ġce iling +ĠF ish +Ġtrans formation +Ġfra ction +Ġadvant ages +Ġto ile +Ġstun ning +Ġmo ist +bre aking +s i +ĠL ocation +ĠMed ium +Ġtext s +Ġu gly +Ġb io +. âĢĶ +ĠB ased +Ġtr ains +ĠW ing +ĠAn cient +ĠRec ords +ĠH ope +Spe cial +ades h +ob i +[ / +Ġtempor arily +V er +h u +os er +Ġover night +Ġm amm +ĠTre asury +ĠV enezuel +ĠMeg a +Ġt ar +Ġexpect s +bl ack +or ph +\\ \\ +Ġaccept ance +Ġrad ar +s is +Ġjun ior +Ġfram es +Ġobserv ation +ac ies +P ower +ĠAdv anced +M ag +olog ically +ĠMe chan +Ġsent ences +Ġanaly sts +augh ters +force ment +Ġv ague +Ġcl ause +Ġdirect ors +Ġeval uate +Ġcabin et +M att +ĠClass ic +A ng +Ġcl er +ĠB uck +Ġresear cher +Ġ16 0 +Ġpoor ly +Ġexperien cing +ĠP ed +ĠMan hattan +Ġfre ed +Ġthem es +ad vant +Ġn in +Ġpra ise +10 4 +ĠLib ya +b est +Ġtrust ed +Ġce ase +Ġd ign +D irect +Ġbomb ing +Ġm igration +ĠSci ences +Ġmunicip al +ĠA verage +Ġgl ory +Ġreve aling +Ġare na +Ġuncertain ty +Ġbattle field +ia o +G od +Ġc inem +ra pe +el le +ap ons +Ġlist ing +Ġwa ited +Ġsp otted +ke ley +ĠAud io +e or +ard ing +idd ing +ig ma +ĠN eg +Ġl one +Ġ ---- +ex e +d eg +Ġtrans f +Ġwas h +Ġsl avery +Ġexpl oring +ĠW W +ats on +Ġen cl +l ies +ĠC reek +Ġwood en +Man ager +ĠBr and +um my +ĠAr thur +Ġbureau cr +Ġbl end +ar ians +F urther +Ġsupposed ly +Ġwind s +Ġ19 79 +Ġgrav ity +Ġanalys es +ĠTra vel +ĠV eter +Ġd umb +Ġaltern ate +g al +Ġconsum ed +Ġeffect iveness +.' ' +Ġpath s +ond a +L A +ĠStr ong +Ġen ables +Ġesc aped +Ġ" " +Ġ1 12 +Ġ198 3 +Ġsm iled +Ġtend ency +F ire +Ġp ars +ĠR oc +Ġl ake +Ġf itness +ĠA th +ĠH orn +Ġh ier +Ġimp ose +m other +Ġp ension +ic ut +bor ne +ic iary +. _ +ĠS U +Ġpol ar +is y +eng u +itial ized +AT A +w rite +Ġexerc ises +ĠD iamond +ot ypes +Ġharm ful +on z +Ġprint ing +st ory +Ġexpert ise +ĠG er +Ġtraged y +ĠF ly +Ġd ivid +amp ire +st ock +M em +Ġre ign +Ġun ve +Ġam end +ĠProp het +Ġmut ual +ĠF ac +Ġrepl acing +H ar +ĠCirc uit +Ġthro at +ĠSh ot +Ġbatter ies +Ġto ll +Ġaddress ing +ĠMedic aid +Ġp upp +ĠN ar +ol k +Ġequ ity +M R +ĠHis pan +ĠL arge +m id +D ev +Ġexp ed +Ġdem o +ĠMarsh all +erg us +Ġf iber +Ġdiv orce +ĠCre ate +Ġsl ower +ĠPark er +ĠStud ent +ĠTr aining +Ret urn +ĠT ru +Ġc ub +ĠRe ached +Ġpan ic +Ġqu arters +Ġre ct +Ġtreat ing +Ġr ats +ĠChristian ity +ol er +Ġsac red +Ġdecl are +ul ative +et ing +Ġdeliver ing +est one +Ġt el +ĠL arry +Ġmet a +ac cept +art z +ĠRog er +hand ed +Ġhead er +Ġtra pped +ĠCent ury +Ġkn ocked +ĠOx ford +Ġsurviv ors +b ot +Ġdemon stration +Ġd irt +Ġass ists +OM E +ĠD raft +ortun ate +fol io +pe red +ust ers +g t +ĠL ock +Ġjud icial +ver ted +Ġsec ured +out ing +ĠBook s +Ġhost ing +Ġlif ted +l ength +Ġj er +Ġwhe els +ĠR ange +umbn ails +Ġdiagn osis +te ch +ĠStew art +ĠP ract +Ġnation wide +Ġde ar +Ġoblig ations +Ġgrow s +Ġmand atory +Ġsusp icious +! ' +A pr +G reat +Ġmort gage +Ġprosecut or +Ġeditor ial +ĠK r +Ġprocess ed +ung le +Ġflex ibility +Ear lier +ĠC art +ĠS ug +Ġfoc uses +Ġstart up +Ġbre ach +ĠT ob +cy cle +ãĢ Į +ro se +Ġb izarre +ãĢ į +Ġveget ables +$ $ +Ġret reat +osh i +ĠSh op +ĠG round +ĠSt op +ĠHawai i +ĠA y +Per haps +ĠBe aut +uff er +enn a +Ġproduct ivity +F ixed +cont rol +Ġabs ent +ĠCamp aign +G reen +Ġident ifying +Ġreg ret +Ġpromot ed +ĠSe ven +Ġer u +ne ath +aug hed +ĠP in +ĠL iving +C ost +om atic +me ga +ĠN ig +oc y +Ġin box +Ġem pire +Ġhor izont +Ġbr anches +Ġmet aph +Act ive +ed i +ĠFil m +ĠS omething +Ġmod s +inc ial +ĠOrig inal +G en +Ġspir its +Ġear ning +H ist +Ġr iders +Ġsacr ific +M T +ĠV A +ĠS alt +Ġoccup ation +ĠM i +Ġdis g +lic t +Ġn it +Ġn odes +e em +ĠP ier +Ġhat red +ps y +ãĥ ī +Ġthe ater +Ġsophistic ated +Ġdef ended +Ġbes ides +Ġthorough ly +ĠMedic are +Ġbl amed +arent ly +Ġcry ing +F OR +pri v +Ġsing ing +ĠI l +Ġc ute +o ided +olit ical +ĠNe uro +å ¤ +Ġdon ation +ĠEag les +ĠG ive +T om +Ġsubstant ially +ĠLic ense +ĠJ a +Ġg rey +ĠAn imal +ĠE R +ĠU nd +Ġke en +Ġconclud e +ĠMississ ippi +Eng ine +ĠStud ios +P ress +o vers +ll ers +Ġ3 50 +ĠR angers +Ġr ou +ert o +E p +iss a +iv an +Ġse al +ĠReg ist +dis play +Ġwe aken +u um +ĠComm ons +ĠS ay +Ġcult ures +Ġl aughed +Ġsl ip +Ġtreat ments +iz able +m art +ĠR ice +Ġbe ast +Ġob esity +ĠLa ure +ig a +Wh ich +hold er +Ġelder ly +Ġp ays +Ġcompl ained +Ġc rop +Ġpro c +Ġexplos ive +ĠF an +ĠAr senal +A uthor +ef ul +Ġme als +Ġ( - +id ays +Ġimag ination +Ġann ually +Ġm s +as ures +H ead +ik h +m atic +Ġboy friend +ĠCom puter +Ġb ump +Ġsur ge +ĠCra ig +ĠKir k +D el +medi ate +Ġscen arios +ĠM ut +ĠSt ream +Ġcompet itors +Ù Ħ +ĠStan ford +ĠRes ources +az ed +b age +Ġorgan is +ĠRe lease +Ġsepar ately +Ġha bits +Ġmeasure ments +ĠCl ose +Ġaccomp any +Ġg ly +Ġt ang +ĠR ou +Ġplug in +Ġcon vey +ĠChall enge +oot s +j an +Ġcur s +ĠRel ations +ke eper +Ġapproach ing +p ing +Spe aking +Ġarrang ement +ĠV I +are ttes +Ġaffect ing +Ġperm its +b ecause +Ġu seless +ĠH us +!! !! +Ġdestro ying +Un fortunately +Ġfasc inating +S em +Ġelect oral +Ġtrans parency +ĠCh aos +Ġvolunte er +Ġstatist ical +Ġactiv ated +ro x +We b +H E +ĠHamp shire +is ive +M ap +Ġtr ash +ĠLaw rence +st ick +C r +Ġr ings +EX T +Ġoper ational +op es +D oes +ĠEv ans +Ġwitness ed +P ort +Ġlaunch ing +ec onom +w ear +ĠPart icip +um m +cul es +ĠR AM +ĠT un +Ġass ured +Ġb inary +Ġbet ray +Ġexpl oration +ĠF el +Ġad mission +it ated +S y +Ġav oided +ĠSim ulator +Ġcelebr ated +ĠElect ric +¥ ŀ +Ġcl uster +itzer land +he alth +L ine +ĠN ash +at on +Ġsp are +Ġenter prise +ĠD IS +clud es +Ġfl ights +Ġreg ards +ĠÃ Ĺ +h alf +Ġtr ucks +Ġcontact s +Ġunc ons +ĠCl imate +Ġimm ense +N EW +oc c +ect ive +Ġemb od +Ġpat rol +Ġbes ide +Ġv iable +Ġcre ep +Ġtrig gered +ver ning +Ġcompar able +q l +Ġg aining +ass es +Ġ( ); +ĠG rey +ĠM LS +s ized +Ġpros per +" ? +Ġpoll ing +Ġsh ar +ĠR C +Ġfire arm +or ient +Ġf ence +Ġvari ations +g iving +ĠP i +osp el +Ġpled ge +Ġc ure +Ġsp y +Ġviol ated +Ġr ushed +Ġstro ke +ĠBl og +sel s +ĠE c +,' ' +Ġp ale +ĠColl ins +ter ror +ĠCanad ians +Ġt une +Ġlabor atory +Ġn ons +t arian +Ġdis ability +ĠG am +Ġsing er +al g +ĠSen ior +Ġtrad ed +ĠWar rior +Ġinf ring +ĠFrank lin +Ġstr ain +ĠSwed ish +Ġsevent h +ĠB enn +ĠT ell +Ġsynd rome +Ġwond ered +id en +++ ++ +ig o +Ġpur ple +Ġjournal ism +Ġreb el +Ġf u +bl og +Ġinv ite +ren cies +ĠCont act +Is rael +ĠCont ent +Ġche er +Ġbed room +ĠEngine ering +ĠQue ens +Ġd well +ĠPlay Station +ĠD im +ĠCol on +l r +Ġoper ates +Ġmotiv ation +US A +ast ered +C ore +ĠTr uth +ol o +OS E +ĠMem ory +Ġpred ec +Ġan arch +Ġ19 20 +ĠY am +à ¨ +b id +Ġgr ateful +Ġexc itement +Ġtre asure +Ġlong est +ct ive +Ġdes erves +Ġreserv es +Ġcop s +ĠOtt awa +ĠEgypt ian +ank ed +Ġart if +Ġhypot hesis +: / +Ġpurch asing +Ġlove ly +H P +Ġdiv ide +Ġstrict ly +Ġquestion ing +Ġtaxp ayers +ĠJ oy +Ġroll s +ĠHe avy +Ġp orts +Ġmag netic +Ġinf lamm +Ġbr ush +t ics +â ĪĴ +Ġbott les +pp y +Ġp add +ãĤ ¯ +m illion +Ġdevast ating +Ġcomp iled +Ġmed ication +Ġtw elve +ĠPer ry +Sp ace +im b +y our +Ġle aked +ĠT ar +Ġun ity +Ġinfect ed +Ġtravel ed +ID E +ĠMc Donald +t xt +ĠPr inc +Ġinter ven +ĠTai wan +ĠP ow +Ġbe aring +ĠTh read +Ġz ones +iz ards +un ks +Ch apter +ll or +Ġ · +Ġw ounds +Ġdisc retion +Ġsucceed ed +ik ing +Ġicon ic +C all +Ġscreen ing +ĠM is +ict s +Ġmin isters +Ġsepar ation +Pl ayer +Ġb ip +Ġbel oved +Ġcount ing +ĠE ye +ar ound +ing ing +Ġtable t +Ġoff ence +in ance +h ave +ĠInf o +ĠNin ja +Ġprotect ive +ĠC ass +M ac +ĠQual ity +N orth +Ġ ic +ĠCub a +ĠChron icle +ĠPro perty +Ġfast est +ot os +ĠG erm +OW N +Ġbo om +ĠStan ley +ergus on +Ġcle ver +Ġent ers +m ode +ter ior +ĠS ens +Ġlin ear +AR K +Ġcomp aring +Ġpure ly +Ġsaf er +ĠPot ter +Ġc ups +R T +Ġgl uc +Ġatt ributed +Ġdu pl +ĠP ap +Ġprec ious +Ġp a +iction ary +ĠT ig +ĠTo o +ol utions +st an +Ġrob ots +Ġlob b +Ġstat ute +Ġprevent ion +w estern +16 0 +ĠAct ive +ĠMar ia +h al +N one +ell ar +ĠK B +ĠPart ners +ĠSing le +ĠFollow ing +ang o +ac ious +Ġth ou +Ġk g +Ġinflu ential +ĠFriend s +S ur +ain ted +Ġfor ums +Ġst arter +Ġcitizens hip +ĠE lection +on ge +ot ation +os ph +;; ;; +ut ical +p ur +ere n +Ġaccus ations +bit ious +ab bit +ĠOr d +Post ed +ir k +Ġsens itivity +ic he +ĠAm y +ĠF ab +Ġsum mit +Ġped est +Ġrub ber +Ġagric ultural +Ġcan cel +A E +Ġin aug +Ġcont am +Ġfirm ly +i w +st age +ĠK an +Ġt ier +Ġinv ention +Ġtransl ated +ĠR ules +B ox +Tw itter +ID S +Ġp izza +Ġdeb ug +ĠD rop +v s +Ġh orses +b ig +Ġb oring +Ġh ood +ĠMcC ain +at ched +ĠBro s +Ġsk ip +Ġess ay +st at +ĠLeg ends +Ġam munition +au c +Ġshoot er +Ġun h +Ġsuppl ied +Ġgener ic +ĠS K +ib an +yr ics +Ġ25 5 +Ġclim bing +Form er +Ġfl ip +Ġjump ing +Ġfrust ration +ĠTer ry +Ġneighborhood s +Ġmed ian +be an +Ġbr ains +Follow ing +Ġsh aped +Ġdraw s +Ġal tered +J ack +Ġrecip es +Ġsk illed +we alth +ach i +e lection +Ġbehavi ors +de als +ĠU ntil +F e +Ġdecl aration +mar ks +ĠBet ween +cel ona +Ġres on +Ġbub ble +Am ong +Ġim perial +G S +Ġfemin ist +200 5 +ĠK yle +Ġaccount ing +ĠTe le +ĠT yr +Ġconnect ing +Ġre hab +ĠP red +s im +Ġmeant ime +Ġphys ician +M W +ĠCamp bell +ĠBr andon +Ġcontribut ing +ĠR ule +ĠWe ight +ĠN ap +Ġinter active +Ġv ag +Ġhel met +ĠCom b +f our +Ġsh ipped +Ġcomple ting +ĠP D +PD ATE +Ġspread ing +Ġsc ary +erv ing +ĠG as +Ġfr ank +s chool +Ġrom antic +Ġstab il +R ob +Ġaccur ately +Ġac ute +ĠH ann +Ġsymbol s +Ġcivil ization +ĠA W +Ġlight ning +Ġcons iders +Ġven ue +Ġ × +Ġo ven +ĠS F +h is +Ġn u +ĠLear n +Ġpe oples +Ġst d +Ġsle e +Ġs lic +ĠStat istics +Ġcor ners +ĠB aker +Ġ: ) +ment ation +ol ver +Ġlaugh ing +ĠT odd +ond e +ĠH ills +Ġn uts +ĠW oman +pl ane +Ġl iver +ĠIn side +S orry +Ġagre es +Ġfund ament +ĠF isher +Ġa uction +Ġthread s +gl as +ĠBas ic +ĠN at +Ġlack ing +Ġceleb ration +j u +Ġs illy +E uro +Ġt att +ight y +cont rolled +T est +ĠSing h +Ġr age +Ġrh yth +o ffic +ĠPh antom +Ġhead lines +Ġrespond ing +ĠMor ning +Ġvit amin +Ġboot s +ĠS ite +al in +p i +Ġvir al +ĠU C +D ER +ĠSe x +Ġst ocks +c urrent +Ġch urches +ĠR are +ĠMur phy +Ġden ial +ĠG aming +Ġtou g +Ġn ick +Ġm akers +ĠRon ald +Ġgener ous +ĠD oc +ĠMor ris +Ġtransform ed +ĠN ormal +Ġ10 4 +ĠKick starter +ĠUp on +On line +ĠI RS +Ġw rap +Ġl oving +Ġarri ves +ĠD ue +Ġhe ter +ĠM ade +Ġrent al +Ġbelong s +Ġatt orneys +Ġcro ps +Ġmat ched +ul um +ol ine +10 9 +Ġdis par +Ġbuy ers +ĠCam bridge +Ġeth ics +rou ps +Ġjust ified +Ġmarg inal +Ġrespect ed +win ning +Ġnodd ed +ĠSer ge +ĠForm er +C raft +######## ######## +ĠWar ner +Ġd ash +et e +Ġent ert +ĠE scape +out heast +Ġkn ees +ĠB omb +Ġr ug +P ass +Ġatt itudes +go vernment +ĠPri or +Ġqual ities +Ġnot ification +ĠPh one +l ie +Ġanticip ated +ĠCom bat +ĠBar ry +Ġ198 2 +Us ers +on er +Ġcomput ing +ĠConnect icut +Ġless er +Ġpe ers +ĠC u +Ġtechn ically +Ġsub mission +ĠUn iversal +Ġman ually +our ge +Ġrespond ents +ĠB TC +ĠH ost +Ġf are +ĠB ird +Ġrece ipt +al so +Ġj ack +Ġagric ulture +Ġsk ull +Ġ! = +Ġpass ive +ĠC I +Ġsoc ieties +Ġremind ed +Ġinter ference +B uy +Ġâ ľ +g on +Ġscrut iny +ĠW itch +Ġconduct ing +Ġ ãĥ +Ġexch anges +ĠMit chell +Ġinhab it +Ġtw ist +B D +Ġwhere ver +group on +Ġj okes +ĠBen jamin +ĠR andom +fr ame +ĠL ions +Ġhighlight ed +ĠArk ansas +E nt +Ġp ile +Ġpre lim +g s +mind ed +Ġfel ony +ĠG A +ĠL uck +Ġpract ically +ĠB os +Ġact ress +D am +ĠB ou +Ġvis a +Ġembed ded +Ġhy brid +Ġear liest +Ġsoon er +s ocial +ĠH A +Ġste ep +Ġdis advant +Ġexplo it +ĠE gg +ĠUlt ra +Ġnecess ity +L ocal +ie ge +Ġd ated +Ġmass es +Ġsubsc ription +pl ess +Ġan onym +Ġpresum ably +Bl ue +The ir +asket ball +ĠPhil ip +Ġcom ed +load ed +r ane +Ġref lection +Ch ina +Ġext ends +Ġform ing +Ġund ers +200 1 +Ġgr at +Ġconcent rations +Ġins ulin +Ġsec ular +Ġwh ilst +Ġwin ners +Ad vertisements +Ġdeliber ately +ĠWork ing +Ġs ink +et ics +d ale +Ġmand ate +Ġg ram +Ġvac ation +Ġwarn ings +ri pp +ĠTH AT +Ġcomment ary +Ġint u +Ġa est +Ġreason ing +Ġbreak down +ĠZ ombie +Ġ-- > +ĠPolit ical +c ott +Ġthr ust +Ġtechn ological +Ġdec iding +Ġtraff icking +L ong +W elcome +pr ising +ĠCommun ications +Ġend ors +Ġsw ift +Ġmetab ol +co ins +res a +ĠHT TP +Ġen roll +ĠH appy +us r +int age +Ġ[ " +u ably +ĠM aterial +Ġrepe al +Se pt +k h +ĠMod i +Ġunder neath +ĠI L +sh ore +Ġdiagn osed +ace utical +Ġsh ower +au x +ĠSw itch +ĠStre ngth +Ġj ihad +n ational +Ġtra uma +uss y +on i +Ġcons olid +Ġcal ories +ĠF lynn +ag ged +16 8 +ĠP ink +Ġfulf ill +Ġch ains +Ġnot ably +ĠA V +L ife +ĠCh uck +m us +ĠUr ban +ĠH end +Ġdep osit +ĠS ad +Ġaff air +OR K +ie val +ĠF DA +Ġt rop +ĠOver all +Ġvirt ue +Ġsatisf action +au nd +Ġl un +ĠSw itzerland +ĠOper ation +pro cess +Ġsh ook +Ġcount ies +le ased +ĠCharl otte +1 12 +Ġtrans cript +Ġre dd +p ush +ĠHe y +ĠAn alysis +[ " +Ġaltern atives +ard less +Ġele ph +Ġpre jud +ĠLe af +H aving +ĠH ub +Ġexpress ions +ĠVol ume +Ġshock ing +ĠRed s +Ġread ily +Ġplan ets +ad ata +Ġcollaps ed +ĠMad rid +Ġir rit +i pper +ĠEn c +ĠW ire +Ġbu zz +ĠG P +ash a +Ġaccident ally +ur u +Ġfrust rated +ĠS A +Ġhung ry +ĠH uff +Ġlab els +ant o +ĠE P +Ġbar riers +) | +ĠBer keley +ĠJ ets +Ġp airs +ĠL an +J ames +ĠB ear +Ġhum or +ĠLiber ty +Ġmagn itude +Ġag ing +ĠM ason +Ġfriends hip +umb ling +Ġemer ge +Ġnewsp apers +Ġam bitious +ĠRich ards +atern al +Ġ198 1 +Ġcook ies +Ġsc ulpt +Ġpur suit +L ocation +Ġscript s +p c +Ġarrang ements +Ġd iameter +Ġl oses +am ation +Ġl iqu +ĠJ ake +aret te +Ġunderstand s +ĠZ en +v m +Ġappro ve +Ġw ip +Ġult ra +Ġint end +ĠD I +asc ular +Ġst ays +ĠK or +ĠK l +Ġinvest ing +L a +Ġbelie ving +b ad +m outh +Ġtaxp ayer +ãĥ ĥ +ĠQue bec +Ġl ap +ĠSw iss +d rop +Ġdr ain +ir i +et c +ft en +ĠN ex +Ġst raw +Ġscream ing +Ġcount ed +Ġdam aging +Ġamb assador +cent ury +Ġpro x +Ġarrest s +u v +il ateral +ĠCh arg +Ġpresc ribed +Ġindepend ently +Ġf ierce +ĠB aby +Ġb rave +Ġsu its += > +Ġbas eline +ĠR ate +Ġis lands +Ġ( ( +g reen +ix els +Ġname ly +ĠVill age +th an +am y +V ersion +g mail +ential s +ĠS ud +ĠMel bourne +Ġarri ving +Ġquant um +e ff +rop olitan +T ri +Ġfun eral +ĠI R +ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ +ĠC ob +it ably +Ġt urb +Ġcomb o +Re view +Ġdeploy ment +u ity +ĠB ott +Ġinv isible +Ġrender ing +Ġunl ocked +Ġa qu +ĠVlad imir +Ġp ad +ĠBr ain +ĠLeg acy +dr agon +ĠKurd ish +Ġsound ed +Ġdet ained +ĠD M +g ary +Ġd aughters +Ġdistur bing +uk a +ĠPar ad +Ġt ast +Ġunf ortunate +Ġu l +em in +Ġattend ance +tr l +Ġpar ks +ĠMem orial +ĠAl ice +oth y +gu ard +ĠD ise +ĠSh an +ĠFor um +R ich +Ġshif ted +ue z +Ġl ighter +ĠMag n +Ġc od +S ch +ham mad +P ub +3 50 +ĠP okemon +Ġprot otype +Ġun re +B ase +ĠStud ents +ĠRep ly +ĠCommun ist +Ġg au +ĠTy ler +I Z +Ġparticip ated +Ġsup rem +ĠDet ails +Ġvessel s +ro d +Ġt ribe +ke ep +Ġassum ptions +Ġp ound +Ġcr ude +ĠAv ailable +Ġswim ming +Ġin clusion +Ġadv ances +c ulation +Ġconserv ation +Ġover d +ĠBuff alo +Art icle +ed ge +Ġaw a +ĠMad ison +Ġsid ew +Ġcat ast +ĠK rist +uc le +ĠHigh way +ĠTer ror +Ġactiv ation +Ġuncons cious +ĠSat an +ĠSus an +ill ery +Ġarr anged +i op +Ġrum ors +ur ring +th ink +ĠKe ith +ĠK ind +Ġavoid ing +by n +n ut +ĠSpe aker +r us +n ames +Ġgu ilt +ĠOlymp ics +Ġsa il +ĠM es +lev ant +ĠColumb us +a ft +C ity +S outh +ĠHar vey +ĠP un +S everal +Ġment ally +Ġimp ress +m ount +ĠUb untu +âĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶ +ĠSuper man +ĠMP s +Ġintent ions +ĠR acing +Ġlike lihood +Ġ2 40 +T otal +Ġto ys +ĠW atson +Ġur ge +L ear +ĠP aper +Ġoccur ring +ĠB eng +ĠC ert +Ġst ones +T im +ĠTw in +z b +ĠD ynam +Ġpolit ician +k ens +ĠEnter prise +UT ERS +Ġab ol +Ġref resh +Ġarbit rary +pe ction +Ġtrou bles +Ġ} ); +t v +Ġpil ots +Ġdist ribute +Ġaud it +Ġp ause +orig inal +Ġr ivals + £ +F ig +T L +ab il +ry ing +L in +ion ed +l on +Ġf ancy +Ġcr ashed +Ġt ract +Ġshe d +Ġcons ume +B ased +down load +in it +Ġvolt age +Int rodu +Ġcondem ned +ĠFin ance +res pect +Ġex cluded +Ġestablish ing +her ic +Ġher itage +Ġspect acular +Ġun st +ĠSnow den +ĠL ane +S an +Ġprotect ions +st ruction +inc inn +Ġmac ro +C ustom +ios ity +Ġes p +Ġfunction ing +Ġm ush +Ġp uzzle +Ġeth ical +M al +Ġgo verning +ĠF erguson +Ġrest ored +Ġst ressed +ĠCoun ter +ĠK as +cl ip +AN S +Ġse iz +U K +by ss +old own +ap i +Ġperman ently +oun ters +W est +Th rough +L ight +at oes +Ġne at +Ġc ord +ure r +Ġsevere ly +ĠA ven +Ġinter rog +Ġtri ple +G iven +N umber +Ġar ise +Ġs her +pl ant +Ġfl ower +ĠC ou +Ġat e +Ġnew er +b ul +Ġmean while +ĠL air +Ġadjust ment +ĠCop yright +Ġd ivers +i ological +Ġgam ers +o at +Ġhistor ically +Ġanal og +Ġlong time +Ġpres cription +ĠM ist +ĠHy per +ĠM aine +ĠDe ity +Ġmulti pl +ĠRe incarn +ĠH yd +ĠP ic +S il +r ants +ĠC ris +. ; +( { +epend ence +Ġrec y +ate ur +Ġqu ad +Ġgl ob +Ġcon ced +te am +Ġcapital ist +ĠL ot +Ġroy al +ĠCy ber +Ġblack s +met ic +ri v +ĠD anny +Ġsp o +ĠR O +Ġanim ated +rypt ed +ĠDep uty +Ġrend ered +F E +Ġstre ak +Ġcloud s +ĠDou g +~~~~ ~~~~ +Ġdisc our +ĠVe h +Ġpsych ology +ĠJ ourney +Ġcry stal +ĠFro st +Ġsuspic ion +Ġrel ate +or us +ĠC rypt +ĠN VIDIA +com ed +ut ing +incinn ati +Ġvulner ability +ost ic +Ġisol ation +Ġcool ing +ĠCoal ition +Ġ1 19 +F our +ĠDe al +Ġâ ī +se mble +ram ent +ĠBar celona +Ġ10 2 +Ġcoc aine +ocaly pse +F eb +ogen ic +Ġmut ation +Ġcrypt oc +ĠK el +ĠG it +a is +Ġs isters +AN K +Ġactiv ate +T er +Ġd read +yl on +Ġprop ri +A ust +ĠDef ault +Ġout door +Ġshe er +ce ive +Ġg ently +Ð ¾ +Pro gram +Ġâ ĨĴ +Ġve gan +ĠCr us +Ġrespons ibilities +ĠH R +OL D +Ġprev ents +Ġst iff +ĠW ere +Ġathlet ic +ĠSc ore +Ġ) : +Ġcolumn s +ĠL oc +av ailable +ĠF ram +ĠS essions +Ġcompan ion +Ġpack s +14 0 +ĠKn ights +Ġf art +Ġstream s +Ġsh ore +Ġapp eals +ĠPer formance +h aul +ĠSt ra +ĠN ag +10 3 +ĠTrans portation +B B +E v +z an +P ublic +Ġtw in +uls ion +M ult +Ġelect ro +Ġstat ue +ation ally +ĠN ort +Ġins pection +/ * +ig ue +Ġcomp assion +ĠT ales +ĠSte in +ĠSc reen +ĠB ug +ĠL ion +g irl +Ġwithdraw al +Ġobject ives +Ġblood y +Ġprelim inary +Ġj acket +Ġdim ensions +ĠC ool +ĠOcc up +Ġw reck +Ġdoub led +ank ing +Ġ19 75 +Ġglass es +ĠW ang +pro v +P ath +connect ed +ĠMult i +ĠNor way +agon ist +Ġfe ared +Ġtouch ing +Ġarg uably +¯¯¯¯ ¯¯¯¯ +ĠNC AA +che m +Ġsp at +ĠW WE +ĠC el +ig ger +Ġattack er +ĠJo in +ob ject +ett a +Ġelim inated +d et +Ġdest ruct +ĠLuc as +ct uary +18 0 +ĠBr ady +ĠBl ues +B ay +au kee +Ġtim eline +Ġdeleg ates +w ritten +uff icient +Ġsh apes +Cop yright +ou ble +serv ice +Ġp ione +Ġcolleg es +Ġrow s +Ġsp ite +Ġassess ed +3 60 +Ġle ase +Ġconfident ial +ck er +ĠMan ning +ĠV oice +Ġse aled +Ġcalcul ate +N O +ĠAss istant +Ġteen ager +ul ent +ather ine +Ġm ock +Ġd iamond +Ġf est +Ġsw itched +Ġres ume +ĠPu erto +Ġl anes +ir ation +ĠSimilar ly +Ġro d +ĠS el +ĠPal ace +ĠLim ited +e ous +Ġvar iant +Ġw ard +Ġ) ) +Sh ow +OO K +A lex +ĠN ep +br is +ĠWik ipedia +Ġexcept ional +Ġman ages +ĠD raw +Ag ain +Ġco pper +ut t +Ġex ports +Ġport folio +Ġelev ated +R ated +ĠOther wise +ĠT act +ĠShe l +ĠT X +" âĢĶ +Ġres ur +ĠW a +ven ant +Ġmon etary +pe ople +E mail +Ġfif ty +ĠS weet +ĠMalays ia +Ġconf using +ĠR io +ud a +uten ant +" ); +Ġpra ised +Ġvol umes +t urn +Ġm ature +Ġnon profit +Ġpassion ate +ĠPriv ate +Ġ10 3 +Ġdesc end +ç ¥ŀ +uff y +head ed +Whe ther +ri en +ze ch +be it +Ġch rom +ĠMc M +Ġd ancing +Ġe leg +ĠNot iced +11 5 +Ġadvoc acy +ENT S +amb ling +ĠMin or +ĠF inn +Ġprior ities +Ġthere of +ĠSt age +ĠRog ers +Ġsubst itute +ĠJ ar +ĠJeff erson +Ġlight ly +10 2 +ĠL isa +u its +ys ical +Ġshif ts +Ġd rones +Ġwork place +Ġres id +ens ed +ah n +Ġpref erences +ser ver +Ġdeb ates +d oc +ĠGod s +Ġhelicop ter +Ġhon our +Ġconsider ably +ed ed +ĠF emale +ĠAn ne +Ġre un +ĠF ace +ĠHall ow +ĠBud get +Ġcondem n +Ġt ender +Pro f +ocr atic +ĠTurn er +ĠAg ric +Ġ19 76 +Ġa pt +d isc +ĠF ighter +ĠA ur +Ġgar bage +in put +ĠK arl +ĠOl iver +ĠL anguage +k n +N on +ĠCl ar +Ġtrad itions +Ġad vertisement +ĠS or +Ġarch ive +Ġvill ages +7 50 +Ġimplement ing +w aukee +Ġdiet ary +Ġswitch ing +Rep ublic +Ġvel ocity +Ġc it +ĠA wards +Ġfin ancing +Ġlast ed +) ] +Ġrem inder +P erson +Ġprec ision +Ġdesign ers +ĠF ried +ĠB order +Ġtr agic +Ġw ield +Ġiniti atives +ĠT ank +w er +Ġjo ins +R o +in ery +Ġar row +Ġgener ating +found er +Ġsear ches +Ġrandom ly +A ccess +Ġb atch +Ġp osed +l at +Ġpursu ing +as a +Ġtest ified +form ing +ĠSh ar +w iki +ĠE ither +S ometimes +Ġsen ators +ĠJohn ny +ĠTal iban +ĠG PS +":" / +ãģ® å +Ġanaly zed +ĠRub io +ĠMove ment +op ard +ii i +St and +f ight +Ġign oring +i ang +ĠG N +so ever +ĠST AT +Ġref using +Ġswe at +Ġb ay +P ORT +ir med +ak y +Ġdis pro +Ġlabel ed +Ġ10 8 +H ello +Ġple asant +ab a +Ġtri umph +Ġab oard +Ġinc om +ĠC row +le tt +Ġfol k +Ġch ase +` ` +ĠBr us +Ġte ens +c ue +Ġter rain +h yd +il ight +OR Y +Su pport +ew s +ll i +rain ts +ĠC and +Ġab used +ach ment +l arg +B as +ĠC ancer +Ġ19 78 +Ġsupp orter +ac cess +ĠTer min +ĠT ampa +ĠAN Y +Ġnew est +ĠCrim inal +ed u +Ġ19 30 +Ġadm its +Ġend e +Ġfail ures +ur ate +ful ness +cy cl +ĠSub ject +Ġinf inite +th ree +W A +p it +ĠInst all +R ad +ili ation +G M +Ġcontin ent +Ġaccommod ate +ĠCl ay +Ġp up +ĠF unction +Ġham mer +ĠAlbert a +Ġrev ised +Ġminor ities +Ġmeasure ment +Con nell +Ġdis able +ĠM ix +In cre +Ġfor k +ĠR osen +Ġimpl ies +umb lr +AN G +Ġprote ins +Ġagg ression +Ġfacilit ate +S N +Ġilleg ally +u er +Ġacad em +Ġp uzz +ĠSh ift +p ay +oll o +Ġaud iences +B uild +Ġno ble +Ġsynt ax +â ĺħ +Ġbe am +ĠB ed +ĠA ld +Ġorig ins +v ideo +Ġ19 77 +ĠAss ault +Ġgar age +Te am +Ġver dict +Ġd war +ĠVirt ual +e vent +Ke ep +Ġsent iment +Ġwild life +sh irt +Ġb urg +Ġrecommend ation +rep resent +Ġgall ery +own ers +Ġsch olar +Ġconven ience +ĠSw ift +Ġconv inc +C ap +Ġwar fare +ĠVis ual +Ġconst itute +Ġab ort +ĠWe ather +ĠLook ing +ĠH em +Ġmart ial +Ġinc oming +et ition +Ġtoler ance +ĠCre ated +Ġfl ows +ĠE lder +Ġsoul s +Ġf oul +ĠP ain +ĠC AN +Ġ2 20 +b c +he nd +Ġgen ius +R eal +ĠW r +omet er +p ad +Ġlim iting +ĠS i +ĠL ore +ĠAd ventures +Ġvar ied +D isc +f in +ĠPerson al +Ch ris +Ġinv ented +Ġd ive +ĠR ise +Ġo z +ĠCom ics +Ġexp ose +ĠRe b +let ters +s ite +im ated +Ġh acking +Ġeduc ated +ĠNob ody +Ġdep ri +Ġincent ive +ãĤ · +Ġovers ight +Ġtrib es +ĠBelg ium +Ġlicens ing +our t +Produ ct +ah l +ĠG em +Ġspecial ist +Ġc ra +ann ers +ĠCor byn +Ġ19 73 +RE AD +Ġsum mar +Ġover look +ĠApp lication +Ġin appropriate +Ġdownload ed +Q ue +ĠB ears +Ġth umb +ĠChar acter +ĠReincarn ated +ĠS id +Ġdemonstr ates +s ky +ĠBloom berg +ĠAr ray +ĠRes ults +ĠFour th +ĠED T +ĠO scar +c end +Ġ10 6 +ĠN ULL +ĠH ERE +m atch +ĠBr un +Ġgluc ose +ie g +eg u +Ġcert ified +Ġrel ie +Ġhuman itarian +Ġpr ayers +K ing +Ġn an +h ou +10 8 +ul u +Ġrenew able +Ġdistingu ish +Ġd ense +ĠV ent +ĠPack age +ĠB oss +Ġedit ors +Ġm igr +T ra +ĠPet ers +ĠAr ctic +200 4 +ĠC ape +Ġloc ally +Ġlast ing +Ġhand y +. ). +P an +ĠR ES +Ind ex +Ġt ensions +Ġformer ly +Ġide ological +Ġsens ors +Ġdeal ers +Ġdef ines +S k +Ġproceed s +Ġpro xy +az ines +ĠB ash +ĠP ad +ĠC raft +eal ous +Ġshe ets +omet ry +J une +cl ock +T T +ĠThe atre +ĠB uzz +Ġch apters +Ġmill enn +Ġd ough +ĠCongress ional +Ġimag ined +av ior +Ġclin ic +Ġ19 45 +Ġhold er +ro ot +oles ter +Ġrest art +B N +ĠHam as +ĠJ ob +Ġor b +Ġr am +Ġdiscl ose +Ġtransl ate +Ġimm igrant +Ġannoy ing +Ġtreat y +an ium +ĠTe a +ĠLeg ion +Ġcrowd s +ĠB ec +ĠA er +oh yd +B ro +Look ing +Ġl bs +Ġagg ress +Ġse am +Ġinter cept +ĠM I +mer cial +act iv +ĠC it +Ġdim ension +Ġconsist ency +Ġr ushing +ĠDou glas +Ġtr im +Inst all +ick er +Ġsh y +10 6 +Ġment ions +pe lled +ĠT ak +c ost +Ġclass room +Ġfort une +dri ven +Ġun le +ĠWhe el +Ġinvest or +ĠM asters +k it +Ġassoci ations +ĠEv olution +op ing +us cript +Ġprov incial +ĠWal ter +av i +S O +Ġun limited +Eng lish +ĠC ards +ĠEb ola +ne red +Ġreven ge +Ġout right +um per +Ġf itting +ĠSol id +Ġform ally +Ġproblem atic +Ġhaz ard +Ġenc ryption +Ġstraight forward +ĠA K +Ġp se +ĠOr b +ĠCh amber +ĠM ak +Cont ents +Ġloyal ty +Ġl yrics +ĠSy m +Ġwel comed +Ġcook ed +Ġmon op +Ġn urse +Ġmis leading +Ġe ternal +Ġshif ting +Ġ+ = +V is +Ġinst itutional +ill ary +Ġp ant +VER T +ĠA CC +ĠEn h +Ġinc on +ĠRE UTERS +Ġdon ated +âĢ¦âĢ¦ âĢ¦âĢ¦ +In tern +Ġexhib it +Ġt ire +ĠR ic +ĠCh ampion +ĠMu hammad +N ING +ĠSoc cer +Ġmob ility +Ġvary ing +ĠM ovie +Ġl ord +o ak +F ield +Ġve ctor +us ions +Ġsc rap +Ġen abling +m ake +T or +. * +| | +ĠWe bsite +ĠN PC +Ġsocial ist +ĠBill y +ĠAdd itional +Ġc argo +Ġfar ms +ĠSo on +ĠPri ze +Ġmid night +Ġ9 00 +se en +ĠSp ot +Ġshe ep +Ġspons ored +ĠH i +ĠJ ump +Ġ19 67 +Micro soft +ĠAg ent +Ġch arts +d ir +Ġadj acent +Ġtr icks +Ġman ga +Ġex agger +/ > +foot ball +ĠF CC +G C +ĠT ier +and ra +OU ND +% ), +Ġfru its +V C +ĠA A +R ober +Ġmid st +â Ĺ +ank a +Ġlegisl ature +ĠNe il +Ġtour ists +" " +ĠWar ning +ĠNever theless +ĠOffic ial +ĠWh atever +Ġm old +Ġdraft ed +Ġsubst ances +Ġbre ed +Ġt ags +ĠT ask +Ġver b +Ġmanufact ured +com ments +ĠPol ish +Pro v +Ġdetermin es +Ob ama +k ers +Ġutter ly +Ġse ct +sc he +ĠG ates +ĠCh ap +Ġal uminum +Ġz ombie +ĠT ouch +ĠU P +Ġsatisf y +Ġpred omin +asc ript +Ġelabor ate +Ġ19 68 +Ġmeas uring +ĠV ari +any ahu +Ġs ir +ul ates +id ges +ick ets +ĠSp encer +T M +oub ted +Ġpre y +Ġinstall ing +ĠC ab +re ed +re ated +Su pp +Ġwr ist +ĠK erry +10 7 +ĠK le +ĠR achel +Ġc otton +ĠA RE +ĠE le +Cont rol +Ġload s +ĠD od +an as +b one +Ġclass ical +ĠReg ional +ĠInt eg +V M +Ġdes ires +Ġaut ism +support ed +ĠM essage +Ġcomp act +writ er +Ġ10 9 +ĠHur ricane +c ision +Ġcy cles +Ġdr ill +Ġcolle ague +Ġm aker +G erman +Ġmist aken +S un +ĠG ay +Ġwhat soever +Ġsell s +ĠA irl +l iv +ĠO ption +Ġsol ved +Ġse ctors +Ġhorizont al +Ġequ ation +ĠSk ill +ĠB io +g ement +ĠSn ap +ĠLeg al +Ġtradem ark +Ġmake up +Ġassemb led +Ġsa ves +ĠHallow een +ĠVer mont +ĠFR OM +Ġfar ming +ĠP odcast +accept able +ĠHig her +Ġas leep +ull ivan +Ġrefere n +ĠLe v +Ġbul lets +ok o +H C +Ġst airs +Ġmain tains +ĠL ower +ĠV i +Ġmar ine +Ġac res +Ġcoordin ator +ĠJ oh +Ġcounterpart s +ĠBrother s +Ġind ict +b ra +Ġch unk +Ġc ents +H ome +ĠMon th +Ġaccording ly +if les +ĠGerm ans +ĠSy n +H ub +Ġey eb +âĶĢâĶĢ âĶĢâĶĢ +Ġr anges +ĠHoll and +ĠRob ot +f c +M ike +Ġpl asma +Ġsw ap +Ġath lete +ĠR ams +,' " +Ġinfect ions +Ġcor rid +Ġv ib +Ġpat ches +Ġtradition ally +Ġrevel ation +Ġswe ep +Ġgl ance +Ġin ex +200 3 +ĠR aw +work ing +os ures +ĠD at +ĠLyn ch +Ġle verage +ĠRe id +Ġcorrel ation +ian ces +av ascript +Ġrep ository +ret ty +Ġ19 72 +24 0 +Ġo un +p ol +ĠRe ed +Ġtact ical +is ite +App le +ĠQu inn +Ġrap ed +ill o +Euro pe +Ġalgorith ms +ĠRod rig +i u +Ġill um +Ġf ame +Ġintrodu cing +Ġdel ays +ĠRaid ers +Ġwh istle +Ġnovel s +ĠRe ally +Ġder iv +Ġpublic ations +ĠNe ither +ĠCom merce +Ġa ston +l anguage +Not es +ĠR oth +ĠF ear +Ġm ate +Ġpar ade +ĠQ B +Ġman eu +ĠC incinnati +m itting +Ġwa ist +ĠR ew +Ġdisc ont +Ð ° +Ġst aring +Ġal ias +Ġsec urities +Ġtoile t +ĠJ edi +Ġun law +v ised +//// //// +] ( +ĠWe iss +Ġpre st +ĠComp an +Ġmem o +ĠGr ace +J uly +ĠEl ite +cent er +ĠSt ay +Ġgal axy +Ġto oth +ĠS ettings +Ġsubject ed +ãĤ ¦ +Ġline back +Ġretail ers +ĠW ant +Ġd angers +A ir +Ġvolunt ary +ew ay +Ġinterpret ed +ot ine +à § +Ġp el +Serv ice +ĠEvent ually +Ġcare ers +Ġthreat en +Ġmem or +ĠBrad ley +anc ies +s n +ĠUn known +N ational +Ġsh adows +ail and +ĠD ash +Every one +izz ard +M arch += ( +Ġpull s +Ġstr anger +Ġback wards +ĠBern ard +imens ional +Ġch ron +Ġtheoret ical +k top +Ġw are +ĠInvest ig +ĠIn iti +ĠOper ations +o ven +oc ide +* / +Ġfl ames +ĠC ash +sh it +Ġc ab +ĠAn aly +ĠSe ah +Ġdefin ing +Ġorder ing +Ġimm un +Ġpers istent +AC H +Russ ian +m ans +Ġh ind +Ġphot ography + © +Ġh ug +Ġ10 7 +ĠH ence +i ots +ude au +Ġsubsid ies +Ġroutine ly +ĠDev ice +it ic +Ġdisg ust +land er +Ġ19 40 +Ġassign ment +ĠB esides +w ick +ĠD ust +us c +struct ed +11 1 +de velop +Ġf ond +Ġinter section +Ġdign ity +Ġcommission er +With out +re ach +Ġcart oon +Ġsc ales +ãĥ Ń +F IG +Ġsurve ys +ĠIndones ia +Ġart work +Ġun ch +Ġcy cling +un ct +au er +or ate +ĠOb viously +Ġcharacter ized +fe ld +Ġaff irm +Ġinn ings +Ġ é +Ġal iens +Ġcl oth +et ooth +ĠC ertain + § +Ġdig est +k now +ĠX L +Ġpredict ions +Ġd in +W AR +Ġafter math +Ex ample +ĠSu ccess +ĠTh r +IG N +Ġmin er +B us +Ġcl arity +heim er +ĠO UT +ĠS end +ĠCirc le +ĠD iet +Ġpron ounced +Ġcreat ors +Ġearthqu ake +atter y +ge ons +Ġo d +Ġlay ing +or p +U lt +pro ject +Ġunder min +Ġsequ el +S am +ĠDark ness +Ġre ception +b ull +Y S +ĠV ir +Ġsequ ences +ĠCo in +Ġout fit +ĠW ait +1 19 +Ġdel ivers +.... .. +Ġbl own +ĠE sc +ĠM ath +per m +ĠU l +Ġgl im +Ġfac ial +Ġgreen house +Ġto kens +/ - +ĠAnn ual +ĠON E +Ġteen age +ĠPhys ical +ĠL ang +ĠC elt +Ġsu ed +ivid ually +Ġpat ience +ch air +reg ular +Ġa ug +in v +ex cept +ĠL il +Ġn est +f d +s um +ĠCh ase +Russ ia +ĠJenn ifer +Ġoff season +Over all +F ore +Ġr iot +A ud +form er +Ġdefend ers +ĠC T +iot ic +rib ly +Ġautom ated +Ġpen is +Ġins ist +Ġdi agram +ĠS QL +ĠG arc +Ġw itch +cl ient +ier ra +am bers +Ġrec ount +f ar +V ery +oster one +Ġappreci ated +ĠPer fect +S ection +Ġd oses +oca ust +Ġcost ly +Ġg rams +ĠSh i +Ġwrest ling +Ġ19 71 +Ġtro phy +Ġn erve +ĠK az +ĠExper ience +Ġpled ged +Ġplay back +Ġcreat ivity +by e +Ġattack ers +Ġhold ers +ĠCo ach +ĠPh D +Ġtransf ers +Ġcol ored +ĠH indu +Ġd rown +Ġlist ened +ĠW A +ias m +P O +Ġappeal ing +Ġdiscl osed +ĠCh icken +ag ging +Ġple aded +Ġnav igation +ĠReturn s +Ġ[ [ +R OR +E A +Ġphotograp her +ĠR ider +ipp ers +Ġsl ice +Ġe rect +Ġhe d +iss ance +ĠVik ings +ur ious +Ġapp et +oubted ly +Ch ild +Ġauthent ic +o os +ĠM aking +Ġannoun cing +Ġb od +Ġmet er +ĠN ine +ĠR ogue +Ġwork force +Ġrenew ed +Ġorganis ations +ac s +P LE +Sh ort +Ġcomp ounds +ĠVis it +Ġen velop +ear th +Ġsupport ive +gg le +ĠBrus sels +ĠGu ild +Cre ate +RE L +Ġaver aged +Ġ19 69 +ri ages +Ġlength y +Ġforg ot +O kay +ĠE rd +Ġdeal er +Ġrec ession +D D +Ġdesper ately +Ġhun ger +Ġst icks +Ġm ph +ĠF aith +Ġintention ally +Ġdem ol +ue ller +ĠS ale +Ġde bris +s pring +Ġle ap +>> >> +Ġcontain ers +se lling +rane an +atter ing +Ġcomment ed +ĠC M +on ut +Ġwood s +es pecially +Ġorgan ize +iv ic +ĠWood s +ang a +s qu +Ġm aj +am on +Ġax is +Ġ19 74 +ĠDen mark +Ġwar rior +ĠP and +Ġout lined +ĠB O +ins ula +z illa +eb ook +Ġd are +Ġsear ched +Ġnav igate +S n +writ ing +Ġun ited +J apan +ĠHe brew +Ġfl ame +Ġrel ies +Ġcatch ing +ĠSh o +Ġimprison ment +Ġp ockets +Ġclos ure +ĠF am +t im +ade qu +Act ivity +Ġrecru iting +ĠW ATCH +ĠArgent ina +d est +Ġapolog ize +or o +Ġlack s +Ġtun ed +ĠGriff in +Ġinf amous +Ġcelebr ity +ss on +Ġ ---------------------------------------------------------------- +ĠIs is +ĠDis play +Ġcred ibility +Ġeconom ies +Ġhead line +ĠCow boys +Ġind ef +Ġl ately +Ġincent ives +but ton +ĠM ob +A ut +Ġres igned +ĠO m +c amp +Ġprof iles +Ġsche mes +olph ins +ay ed +Cl inton +en h +ĠY ahoo +Ġab st +Ġan k +su its +Ġw ished +ĠMar co +udd en +Ġsp here +ĠB ishop +Ġincorpor ated +ĠPl ant +11 4 +Ġh ated +p ic +Ġdon ate +Ġl ined +Ġbe ans +Ġsteal ing +Ġcost ume +Ġsher iff +Ġfor ty +Ġint act +Ġadapt ed +Ġtrave lling +b art +Ġnice ly +Ġdri ed +Ġsc al +os ity +NOT E +ĠB h +ĠBron cos +ĠI gn +Ġint imate +Ġchem istry +Ġopt imal +D eb +ĠGener ation +Ġ] , +ich i +ĠW ii +ĠYOU R +vent ions +W rite +Ġpop ul +un ning +ĠW or +V ol +Ġqu een +head s +K K +Ġanaly ze +op ic +ear chers +Ġd ot +leg raph +ast ically +Ġupgr ades +Ġca res +Ġext ending +Ġfree ze +Ġin ability +Ġorg ans +Ġpret end +Ġout let +11 3 +ol an +ĠM all +ul ing +t alk +Ġexpress ing +ĠAl ways +ĠBe gin +f iles +Ġlic enses +% % +ĠM itt +Ġfil ters +ĠMil waukee +G N +Ġunf old +M o +Ġnut rition +pp o +B o +Ġfound ing +Ġunder mine +Ġeas iest +ĠC zech +ĠM ack +Ġsexual ity +ĠN ixon +W in +ĠAr n +ĠK in +ãĤ £ +ic er +Ġfort un +Ġsurf aces +agh d +Ġcar riers +ĠP ART +ĠT ib +Ġinter val +Ġfrust rating +ĠSh ip +ĠAr med +ff e +Ġbo ats +ĠAb raham +in is +Ġsu ited +th read +i ov +ab ul +ĠVenezuel a +Ġto m +su per +Ġcast le +alth ough +iox ide +ec hes +Ġevolution ary +Ġnegoti ate +Ġconfront ed +Rem ember +Ġ17 0 +S uch +Ġ9 11 +m ult +ĠA byss +ur ry +ke es +spe c +ĠBarb ara +Ġbelong ing +Ġvill ain +ist ani +Ġaccount able +Ġport ions +ĠDe cl +U r +ĠK ate +g re +Ġmag azines +UC K +Ġregul ate +om on +ĠAl most +Ġover view +Ġsc ram +Ġl oot +ĠF itz +Ġcharacter istic +ĠSn ake +s ay +ĠR ico +Ġtra it +ĠJo ined +au cus +Ġadapt ation +ĠAirl ines +Ġarch ae +ĠI de +Ġb ikes +Ġliter ary +Ġinflu ences +ĠUs ed +C reat +Ġple a +ĠDef ence +ĠAss ass +Ġp ond +UL T +) " +Ġeval uated +Ġob taining +Ġdem ographic +Ġvig il +ale y +Ġsp ouse +ĠSeah awks +resp ons +ĠB elt +um atic +Ġr ises +run ner +ĠMichel le +Ġpot ent +r ace +ĠP AC +F ind +olester ol +IS S +ĠIntrodu ced +ress es +ign ment +O s +ĠT u +ĠDe x +ic ides +Ġspark ed +ĠLaur a +ĠBry ant +Ġsm iling +ĠNex us +Ġdefend ants +ĠCat al +Ġdis hes +sh aped +Ġpro long +m t +( $ +ãĢ Ĥ +Ġcalcul ations +ĠS ame +Ġp iv +H H +Ġcance lled +Ġgr in +Ġterrit ories +ist ically +C ome +ĠP arent +Pro ject +Ġneg lig +ĠPriv acy +Ġam mo +LE CT +olute ly +ĠEp ic +Ġmis under +w al +Apr il +m os +path y +ĠC arson +Ġalbum s +ĠE asy +Ġpist ol +< < +Ġ\ ( +t arget +hel p +Ġinter pre +cons cious +ĠH ousing +ĠJ oint +12 7 +Ġbe ers +s cience +ĠFire fox +effect ive +ĠC abin +ĠO kay +ĠApp lic +Ġspace craft +ĠS R +ve t +ĠStr ange +S B +Ġcor ps +iber al +e fficient +Ġpreval ence +Ġeconom ists +11 8 +Th read +ord able +OD E +ĠC ant +=- =- +if iable +ĠA round +Ġpo le +Ġwilling ness +CL A +ĠK id +Ġcomple ment +Ġsc attered +Ġin mates +Ġble eding +e very +Ġque ue +ĠTr ain +Ġh ij +Ġme lee +ple ted +Ġdig it +Ġg em +offic ial +Ġlif ting +Ð µ +Re qu +it utes +Ġpack aging +ĠWork ers +h ran +ĠLeban on +ol esc +Ġpun ished +ĠJ uan +Ġj am +ĠD ocument +Ġm apping +ic ates +Ġinev itably +Ġvan illa +ĠT on +Ġwat ches +Ġle agues +Ġiniti ated +deg ree +port ion +Ġrec alls +Ġru in +Ġm elt +I AN +Ġhe m +Ex p +Ġb aking +ĠCol omb +at ible +Ġrad ius +pl ug +ĠI F +et ically +Ġf ict +H ER +ĠT ap +atin um +Ġin k +Ġco h +ĠW izard +b oth +te x +Ġsp ends +ĠCurrent ly +ĠP it +Ġneur ons +ig nt +Ġr all +Ġbus es +b uilding +Ġadjust ments +Ġc ried +ibl ical +att ed +ĠZ ion +ĠM atter +Ġmed itation +ĠD ennis +Ġour s +ĠT ab +Ġrank ings +ort al +Ġad vers +Ġsur render +ĠG ob +ci um +om as +im eter +Ġmulti player +Ġhero in +Ġoptim istic +Ġindic ator +ĠBr ig +Ġgro cery +Ġapplic ant +ĠRock et +v id +Ex ception +p ent +Ġorgan izing +Ġenc ounters +ĠT OD +Ġjew el +S ave +ĠChrist ie +Ġhe ating +Ġl azy +ĠC P +Ġcous in +Con fig +Ġreg ener +Ġne arest +Ġachie ving +EN S +th row +ĠRich mond +ant le +200 2 +Ġan ten +b ird +13 3 +Ġn arc +r aint +un ny +ĠHispan ic +ourn aments +Ġprop he +ĠTh ailand +ĠT i +Ġinject ion +Ġinher it +rav is +Ġmed i +Ġwho ever +ĠDE BUG +G P +ĠH ud +C ard +p rom +Ġp or +Ġover head +L aw +Ġviol ate +Ġhe ated +Ġdescript ions +Ġachieve ments +ĠBe er +ĠQu ant +W as +Ġe ighth +ĠI v +Ġspecial ized +U PDATE +ĠD elta +P op +J ul +ĠAs k +oph y +Ġnews letters +ĠT ool +Ġg ard +ĠConf eder +ĠGM T +ĠAb bott +Ġimm unity +ĠV M +Is lam +Ġimpl icit +w d +Ġ19 44 +rav ity +omet ric +Ġsurv iving +ur ai +ĠPr ison +Ġr ust +ĠSk etch +Ġbe es +ĠThe ory +Ġmer it +T ex +ch at +Ġm im +Ġpast e +ĠK och +Ġignor ance +ĠSh oot +Ġbas ement +Un ited +ĠAd vis +he ight +Ġf oster +Ġdet ain +in formation +Ġne ural +' ; +Ġprov es +all ery +Ġinv itation +um bers +Ġc attle +Ġbicy cle +z i +Ġconsult ant +Ġap ology +ĠT iger +Ġ12 3 +99 9 +Ġind ividually +r t +ig ion +ĠBrazil ian +Ġdist urb +Ġentreprene urs +Ġfore sts +cer pt +pl ates +p her +clip se +Ġtw itter +Ġac ids +ograph ical +h um +ĠB ald +if ully +Ġcomp iler +ĠD A +Ġdon or +as i +Ġtrib al +l ash +ĠCon fig +Ġapplic ants +Ġsal aries +13 5 +Put in +ĠF ocus +ir s +Ġmisc onduct +ĠH az +Ġeat en +M obile +Mus lim +ĠMar cus +v iol +Ġfavor able +Ġst ub +ad in +ĠH ob +Ġfaith ful +Ġelectron ics +Ġvac uum +w ait +back ed +econom ic +d ist +Ġten ure +Ġsince re +ĠT ogether +ĠW ave +Ġprog ression +Ġden ying +Ġdist ress +br aska +th ird +Ġmix ing +Ġcolon ial +Ġpriv ately +Ġun rest +atern ity +Ġprem ises +ant i +greg ation +Ġlic ence +ĠH ind +ĠSam uel +Ġconvinc ing +ĠA ce +ĠR ust +ĠNet anyahu +Ġhand les +ĠP atch +orient ed +ah o +ĠG onz +Ġhack ers +claim er +Ġcustom s +ĠGr an +f ighters +Ġl uc +Ġman uscript +aren thood +Ġdev il +Ġwar riors +Ġoff enders +Will iam +Ġhol idays +Ġnight mare +Ġle ver +iff erent +St at +Ġexhib ition +put ed +ĠP ure +Ġal pha +Ġenthus iasm +ĠRepresent atives +E AR +ĠT yp +Ġwhe at +ĠAl f +Ġcor rection +Ġev angel +AT T +M iss +Ġs oup +Ġimpl ied +par am +Ġsex y +ĠL ux +Ġrep ublic +p atch +ab lish +Ġic ons +Ġfather s +ĠG ET +ĠCar ib +Ġregul ated +ĠCo hen +ĠBob by +Ġn er +Ġb ent +vent ory +ĠAl ong +ĠE ST +ĠWall ace +Ġmurd ers +r ise +ke ll +ĠCommon wealth +Ġn asty +et a +ĠM IT +Ġadminist ered +Ġgenuine ly +Ed itor +n ick +Ġhyd ro +**************** **************** +ĠB le +Ġfin es +Ġg orge +aus ible +r h +Ġapp le +ment ioned +Ġro pe +ot yp +H R +Ġdisappoint ing +Ġc age +n ik +Ġdoub ts +ĠF REE +print s +ĠM UST +Ġvend ors +ĠIn qu +Ġliber als +Ġcontract or +Ġup side +child ren +Ġtrick y +Ġregul ators +charg ed +l iter +Ġ *** +Ġreb ell +l ang +Ġloc als +Ġphys icians +Ġhe y +ar se +t m +ĠLe x +Ġbehavior al +success ful +F X +Ġbr ick +ov ic +Ġcon form +Ġreview ing +Ġins ights +Ġbi ology +ĠRem ove +ĠExt ra +Ġcomm itting +indu ced +ignt y +ig m +Ġat omic +Comm on +ĠE M +ĠP ere +ĠIt ems +e h +Ġpres erved +ĠH ood +Ġprison er +Ġbankrupt cy +Ġg ren +us hes +Ġexplo itation +Ġsign atures +Ġfin an +] ," +ĠM R +Ġme g +rem lin +Ġmusic ians +Ġselect ing +Ġexam ining +IN K +l ated +H i +Ġart ic +Ġp ets +Ġimp air +ĠM AN +Ġtable ts +in clude +R ange +Ġca ut +Ġlog s +Ġmount ing +Ġun aware +Ġdynam ics +ĠPalest ine +ĠQu arter +ĠPur ple +Ġm a +ĠIm port +Ġcollect ions +ci ation +Ġsuccess or +Ġcl one +Ġaim ing +Ġposs essed +Ġstick ing +Ġsh aking +Ġloc ate +ĠH ockey +T urn +17 0 +Ġfif teen +ĠHar rison +Ġcontinu ously +ĠT C +ĠVal ent +ĠRes cue +Ġby pass +am ount +Ġm ast +Ġprotect s +Ġart istic +Ġsomet ime +Ġsh oe +Ġshout ed +ific ant +et itive +ĠReg ister +ĠJ in +Ġconcent rated +ling ton +on ies +Ġgener ator +yr im +ĠAr men +Ġclear ing +id o +ĠT W +al ph +Ġlad ies +H ard +Ġdial og +Ġinput s +æ ľ +Ġpos es +Ġsl ots +ĠPrem ium +Ġle aks +Ġboss es +Ġ11 3 +c ourse +A cc +ĠNew ton +ĠAust ria +ĠM age +Ġte aches +ab ad +Ġwe ars +Ġc yl +Ġcur se +ĠS ales +ĠW ings +Ġp sy +Ġg aps +ĠIce land +ĠP interest +Ġland lord +Ġdefin itions +ĠK er +Ġsufficient ly +ĠP ence +ĠArch itect +Ġsur pass +Ġ11 4 +Ġsuper hero +ĠDise ase +Ġpri ests +ĠC ulture +Ġdefin itive +Ġsecret ly +ĠD ance +inst all +ch ief +ĠJess ica +W ould +Up dated +Ġlock er +ĠK ay +Ġmem orial +è ¦ +f at +Ġdis gu +Ġflav ors +ĠBase ball +ĠRes istance +Ġk icks +Ġen v +Ġteen agers +D ark +ĠC AR +Ġh alt +ĠL G +ĠGab riel +Ġfe ver +Ġs atur +Ġm all +Ġaffili ate +ĠS leep +ĠSpe cific +ĠV el +Ġj ar +ĠSac red +ĠEd wards +ĠA CL +Ġret ained +ĠG iant +Ġlim itation +in ces +Ġref usal +ĠT ale +ĠBut ler +Ġacc idents +ĠC SS +Ġimport ed +ĠCop y +Î ± +ER T +z el +Ġdiv isions +h ots +ĠAl b +ĠD S +Load er +W ashington +at isf +ĠCreat ive +\ . +ĠAut om +red ict +Ġrecept or +ĠCarl os +Met hod +ok a +Ġmal icious +Ġste pping +, [ +ĠD ad +Ġatt raction +ĠEffect s +ĠPir ate +ĠC er +ĠIndust ry +ĠR ud +Ġchar ter +Ġd ining +Ġins ists +Ġconfig ure +Ġ( # +ĠSim ple +ĠSc roll +UT C +17 5 +ĠK on +Ġmarket place +Ġ ãĤ +Ġref res +Ġg ates +er red +ĠP od +Ġbeh ave +Fr ank +n ode +Ġendors ed +he tt +as ive +ĠHom eland +Ġr ides +ĠLe ave +er ness +Ġflood ing +A FP +Ġris en +Ġcontin ually +Ġun anim +ĠCont ract +ĠP as +Ġgu ided +ĠCh ile +b d +Ġsu cc +pt ic +Ġcomm ittees +ĠL uther +ĠAny one +Ġs ab +12 4 +Ġp ixel +ĠB ak +ĠT ag +ĠBenn ett +En ter +sm all +ĠPresident ial +Ġp ul +Ġcontr ace +arch ive +Ġcoast al +ĠK ids +19 2 +âĢ ² +ick y +ING TON +Ġw olf +ĠSt alin +T ur +id get +am as +ĠUn less +Ġspons or +Ġmor ph +ĠCho ose +Ġrun ner +Ġun bel +Ġm ud +ĠMan a +Ġdub bed +Ġg odd +ure rs +wind ow +Ġrel ied +Ġcelebr ating +os c +Ġ13 5 +Ġlobb ying +Ġincom plete +Ġrestrict ion +Ġinc ap +it us +Ġexpect ation +ĠAp ollo +Ġint ens +Ġsyn c +G H +Ġmanip ulation +B Y +Ġspe ar +Ġbre asts +Ġvol can +il ia +M aterial +Ġform ats +ĠB ast +Ġparliament ary +Ġsn ake +Ġserv ants +ĠTr udeau +ĠGr im +ĠArab ic +ĠSC P +ĠBoy s +st ation +Ġprospect ive +ord e +in itialized +Ġb ored +AB LE +Ġaccess ed +Ġtax i +ĠShe ll +aid en +urs ed +in ates +ĠIns urance +ĠPet e +Sept ember +6 50 +Ġad ventures +ĠCo ver +Ġt ribute +Ġsk etch +Ġem power +Ġ Ø +ĠGl enn +ĠD aw += \" +ĠPolit ics +Ġgu ides +Ġd ioxide +ĠG ore +ĠBr ight +ĠS ierra +Ġval ued +c ond +Ġpo inter +Se lect +Ġrisk y +Ġabsor b +im ages +Ġref uses +Ġbon uses +__ _ +Ġh ilar +ĠF eatures +2 20 +ĠCollect or +F oot +Ġ19 64 +cul us +Ġd awn +Ġwork out +ĠL O +Ġphilosoph ical +ĠSand y +ĠYou th +Ġl iable +A f +bl ue +Ġovert urn +less ness +ĠTrib une +ĠIn g +Ġfact ories +Ġcat ches +Ġpr one +Ġmat rix +Ġlog in +Ġin acc +Ġex ert +s ys +Ġneed le +ĠQ ur +Ġnot ified +ould er +t x +Ġremind s +Ġpublisher s +Ġn ort +Ġg it +Ġfl ies +ĠEm ily +Ġflow ing +ĠAl ien +ĠStr ateg +Ġhard est +Ġmod ification +AP I +ĠM Y +Ġcr ashes +st airs +n umber +Ġur ging +ch annel +ĠFal con +Ġinhabit ants +Ġterr ifying +Ġutil ize +Ġban ner +Ġcig arettes +Ġsens es +ĠHol mes +Ġpract ition +ĠPhill ips +ott o +Ġcomp ile +Mod el +ĠK o +Ġ[ ] +Americ ans +ĠTer ms +Ġmed ications +ĠAn a +Ġfundament ally +ĠNot ice +Ġwe aker +Ġ 0000 +Ġgar lic +Ġout break +Ġeconom ist +ĠB irth +Ġobst acles +ar cer +ĠOr thodox +Ġplace bo +ĠC rew +asp berry +ĠAng els +Ġdis charge +Ġdestruct ive +11 7 +ĠR ising +Ġd airy +l ate +Ġcoll ision +ĠTig ers +ean or +ocument ed +ĠIn valid +Ġd ont +ĠL iter +ĠV a +Ġhyd rogen +Ġvari ants +ĠBrown s +Ġ19 65 +Ġind igenous +Ġtrad es +Ġremain der +Ġswe pt +ĠImp act +Ġred ist +Ġun int +grad uate +ãĥ ķ +ĠW ILL +ãģ® ç +ĠCrit ical +Ġf isher +Ġv icious +Ġrevers ed +Y ear +ĠS ox +Ġshoot ings +Ġfil ming +Ġtouchdown s +ai res +m el +Ġgrand father +Ġaffect ion +ing le +Ġover ly +Add itional +Ġsup reme +ĠGr ad +Ġsport ing +Ġmer cy +ĠBrook s +ount y +Ġperform s +Ġtight ly +Ġdem ons +Ġkill ings +Ġfact ion +ĠNov a +aut s +Ġund oubtedly +ar in +Ġunder way +ra k +Ġl iv +ĠReg ion +Ġbrief ing +s ers +cl oud +ĠM ik +us p +Ġpred iction +az or +Ġport able +ĠG and +Ġpresent ing +Ġ10 80 + » +ush i +ĠSp ark +there um +Ġjust ification +ĠN y +Ġcontract ors +ming ham +ĠSt yle +å ħ +ĠChron icles +ĠPict ure +Ġprov ing +Ġw ives +set t +Ġmole cules +ĠFair y +Ġconsist ing +Ġp ier +al one +in ition +Ġn ucle +j son +Ġg otta +Ġmob il +Ġver bal +ar ium +Ġmon ument +uck ed +Ġ25 6 +T ech +mine craft +ĠTr ack +Ġt ile +Ġcompat ibility +as is +Ġs add +Ġinstruct ed +ĠM ueller +Ġle thal +Ġhorm one +Ġor che +el se +Ġske let +Ġentert aining +Ġminim ize +ag ain +Ġunder go +Ġconst raints +Ġcig arette +ĠIslam ist +Ġtravel s +ĠPant hers +l ings +C are +Ġlaw suits +ur as +Ġcry st +Ġlow ered +Ġaer ial +Ġcomb inations +Ġha un +Ġch a +Ġv ine +Ġquant ities +Ġlink ing +b ank +Ġso y +B ill +ĠAngel a +Ġrecip ient +ĠProt est +Ġs ocket +Ġsolid arity +Ġâ Ĩ +m ill +Ġvar ies +ĠPak istani +Dr agon +Ġun e +Ġhor izon +³³³³ ³³³³ +Ġprov inces +Ġfrank ly +Ġenact ed +not es +[ ' +Ġ19 2 +ocr acy +Ġendorse ment +Ġover time +Tr ue +L ab +lic ted +ĠD NC +Ġbe ats +ĠJam ie +15 2 +ĠIN T +Cont act +Ġaccount ed +h ash +ĠPack ers +p ires +Ġles bian +Ġamend ments +Ġhop eful +ĠFin land +Ġspot light +Ġconfig ured +Ġtrou bled +Ġg aze +ĠCal gary +Ġrel iability +Ġins urg +sw er +b uy +ĠSk in +Ġp ixels +Ġhand gun +Ġpar as +Ġcateg or +ĠE L +ĠRe x +Ind eed +Ġkind a +Ġconj unction +ĠBry an +ĠMan ufact +y ang +Pl us +S QL +ish ment +Ġdom inate +Ġn ail +Ġo ath +Ġeru pt +ĠF ine +it bart +ĠCh ip +ĠAb d +ĠN am +Ġbuy er +Ġdiss ent +Le aks +Cont in +Ġr ider +ĠSome one +Ġill usion +c in +ĠBoe ing +Ġin adequ +ov ation +i ants +Ġreb uild +4 50 +ĠDest iny +S W +ĠT ill +H it +ia z +ĠBang l +acher s +ĠRe form +Ġse gments +Ġsystem atic +d c +ĠConserv atives +Ġport al +h or +ĠDragon bound +Ġdrag ged +om o +Ġthe e +ad vert +ĠRep orts +ĠE t +Ġbarrel s +Aug ust +Ġcompar isons +Ġhe x +Ġan throp +" [ +bor ough +ab i +Ġpict ured +play ing +ĠAdd ress +ĠMir ror +Sm ith +Ġt ires +ĠN PR +AA AA +Ġclass ification +ĠTh an +ĠH arm +ĠR A +Ġreject ion +min ation +Ġr anged +ĠF alls +D I +H ost +ãĤ ´ +ĠEx ample +list ed +th irds +Ġsaf egu +br and +Ġprob able +Can ada +IT ION +ĠQ aeda +Ġch ick +Ġimport s +h it +l oc +W W +Ġble w +Ġany time +Ġwh oles +ik ed +Ġcal culation +cre ate +ĠO ri +Ġupgr aded +Ġapp ar +ut ory +ĠM ol +B rit +ĠJ ong +IN AL +ĠStart ing +Ġd ice +urt le +Ġre lying +cl osure +Ġprof itable +Ġsl aughter +ĠMan ual +c aster +Ġ" $ +Ġfe ather +ĠSim ply +ie ves +Ġdeter ior +ĠPC I +Ġst amp +Ġfl aws +Ġsh ade +ham mer +Ġpass port +Ġcont ing +am el +Ġobser vers +Ġneg lect +ĠR B +ĠBrother hood +Ġskept ical +f amily +us k +Ġemotion ally +â Ļ +ĠBet a +ason able +id ity +ĠM ul +Ġkick ing +ĠC arm +oll ah +VERT IS +ĠAt hen +Ġlad der +ĠBul let +å £ +00 01 +ĠWild life +ĠM ask +ĠN an +R ev +Ġun acceptable +leg al +Ġcrowd ed +ag i +ĠC ox +j e +Ġmor ality +Ġfu els +Ġc ables +Ġman kind +ĠCarib bean +Ġanch or +Ġby te +ĠO ften +ĠO z +Ġcraft ed +Ġhistor ian +ĠW u +Ġtow ers +ĠCitiz ens +Ġhel m +Ġcred entials +Ġsing ular +ĠJes se +Ġtack les +Ġcont empt +Ġa fore +ĠSh adows +Ġn il +Ġur gent +app le +bl ood +Ġv on +Ġoff line +Ġbreat he +Ġj umps +Ġirre levant +ox ic +om al +import ant +J im +Ġgl oves +arm ing +dep th +Ġtal ents +ook ie +ĠS B +Ġpal m +uff s +est a +IG H +Ġcan on +ĠVer izon +ĠP le +Ġcou pled +vel t +Ġfundra ising +ĠGet ting +ĠD LC +Ġmathemat ical +ĠH S +ĠCard inals +te lling +Ġspons ors +Ġ Ï +ĠBull s +op tion +Ġprop ose +Ġmem orable +Ġembr aced +Ġdecl ining +He alth +ed a +Ġ} ; +Ġsp am +m ile +Ġpit cher +ĠE ight +Ġcar ing +ut ic +ro le +Ġair line +ernand ez +ĠAth let +Ġcert ification +ux e +rig er +Ġem pir +Ġsens ation +Ġdis m +Ġb olt +Ġev olve +H ouse +Ġconsult ation +ĠD uty +Ġtou ches +ĠN athan +Ġf aint +h ad +" ( +ĠCons umer +ĠExt reme +Ġ12 7 +ĠHer m +ĠSac rament +iz oph +Ġanx ious +ul ously +Ġsoc ially +ĠU TC +Ġsol ving +ĠLet ter +Hist ory +ed uc +Pr ice +) ); +Ġrel oad +am ic +Ġp ork +Ġdisc ourse +Ġt ournaments +ai ro +ĠK ur +ĠCost a +Ġviol ating +Ġinterf ere +Ġrecre ational +uff le +Ġspe eches +Ġneed ing +Ġremem bers +Ġcred ited +n ia +f ocused +amer a +Ġb ru +um bs +ĠCub an +Ġpreced ing +Ġnons ense +ac ial +Ġsmart phones +ĠSt ories +S ports +ĠEmer gency +oun cing +ef ined +Ġb er +Ġconsult ing +Ġm asters +he astern +." [ +ĠRun ning +Ġsus cept +ĠF eng +Americ a +pr ises +st itial +ĠWeek ly +ĠGreat er +mod ules +if ter +G raphics +ul er +Ġwho lly +Ġsupp ress +Ġconce aled +Ġhapp ily +Ġaccept s +ĠEn joy +Ġr ivers +ĠEx cept +2 25 +ĠN HS +ĠMc Connell +Ġp ussy +fer red +ut able +Ġatt ain +Ġ> = +Ġdepos its +roph ic +Ġnot orious +ĠSh aw +il itation +Ġepid emic +all ic +Ġsmall est +ov ich +Ġaccess ories +per ties +Ġsur plus +ĠMe ch +Ġamb ig +ĠImm igration +Ġch im +ev al +Ġpract icing +ĠMyster y +Ġdom ains +ĠSil icon +app s +Ġkilomet ers +e a +ĠSm ash +Ġwarrant y +Ġn ost +s il +re v +J on +ĠDub lin +Ġtast es +Ġb out +g reat +er ror +Ġsw itches +ĠB apt +D O +ok i +Ġsour ced +pro du +Ġattach ment +ĠIss ue +ĠQuest ion +Jo in +Ġf itted +Ġunlaw ful +^ ^ +ere k +Ġauthent ication +Ġst ole +Ġaccount ability +l abel +S earch +Ġal beit +atic an +fund ed +ĠAdd ing +ĠI Q +Ġsub mar +l it +a que +ĠLear ning +Ġint eger +M aster +ĠCh rom +Ġprem ier +O p +ĠLi u +Ġbl essed +ĠGl obe +ĠResp onse +Ġlegit im +ĠMer kel +Ġdispos al + ´ +Ġgau ge +pe at +Ġindu ced +Ġquestion able +arth y +ĠV it +ĠF eed +U ntil +U t +worth y +R Y +ĠH erald +ĠHam mer +Ġmed al +ĠR ivers +ĠH ack +Ġclar ify +Ġtrack ed +Ġautonom ous +Ġten ant +ĠQ atar +er ie +Ġgr im +ĠMon itor +Ġresist ant +ĠSpe c +ĠWell s +N AS +14 8 +Ġmin ers +iot ics +Ġmiss es +11 6 +g ian +g it +ĠE yes +p res +Ġgrad uated +Ġang el +Ġsyn chron +Ġefficient ly +Ġtrans mitted +H arry +Ġglob ally +EN CE +ĠMont ana +r aged +ĠPre vention +Ġp iss +ĠL l +Ġshe lf +ĠB JP +ĠTest ament +ĠL ate +ik er +ĠH app +ĠJul ian +h all +Ġsp ont +Ġshut down +Ġincons istent +Ġsubscrib ers +Ġske leton +ĠNe braska +Ġins pire +ĠV oid +F eed +Ġang les +ĠSpr ings +Ġbench mark +Ġvacc ines +izoph ren +se xual +uff ed +Ġsh ine +ĠK ath +Ġgest ure +ine a +Ġr ip +Ġopp ression +Ġcons cience +b t +ĠL um +Ġinc idence +ĠF a +w r +Ġmin eral +ĠSp urs +alk y +Ġth under +Ġop io +Be ing +ĠPal m +Ġwas ted +Ġl b +i aries +ĠIniti ative +Ġcur ric +Ġmark er +ĠMc L +Ġext ensions +ĠP v +ĠAr ms +Ġoffer ings +Ġdef enses +Ġvend or +Ġcontrad ict +ĠCol in +Ġredd it +Ġper ipher +12 2 +Ġs ins +E dit +IC T +So ft +ĠSh ah +Ġadministr ator +ĠT rip +Ġporn ography +Ġtu ition +in ence +ĠPro gress +Ġcat alog +Ġsu ite +Ġh ike +Ġreprodu ctive +eng ine +Ġd rought +ĠNo ah +Ġ2 30 +Ġd ude +Ġrelax ed +Ġpart ition +Ġparticip ant +Ġtel esc +Ġfe as +ĠF F +own er +Ġswe eping +Ġl enses +Ġmatch up +ĠRe pl +ourn als +Ġcred ible +Ġgrand mother +Ġther mal +Ġsubscrib ing +Ġident ities +col m +U CT +Ġreluct ant +us ers +ĠC ort +Ġassist ed +OS S +ATION S +IS H +Ġpharm aceutical +ic able +ad ian +ĠSon ic +ĠF ury +ĠM ong +A H +ĠPsych ology +Ġph osph +Ġtreat s +Ń Ķ +Ġstead ily +ĠHell o +Ġrel ates +Ġcl ue +Ex pl +a uth +Ġrev ision +Ġe ld +os ion +Ġbr on +14 4 +ri kes +Ġmin es +Ġblank et +ĠF ail +el ed +ĠIm agine +ĠPl anned +a ic +Re quest +M ad +ĠHor se +ĠEag le +Ġcap ac +15 7 +Ġl ing +ĠN ice +ĠP arenthood +min ster +og s +ens itive +Not hing +Ġcar n +F in +ĠP E +Ġr ifles +ĠL P +S and +Ġgui Active +Ġtour ist +C NN +Ġunve iled +Ġpredec essor +} { +u ber +Ġoff shore +Ġopt ical +ĠR ot +ĠPear l +et on +Ġst ared +Ġfart her +at ility +cont in +ĠG y +ĠF oster +ĠC oc +ri ents +Ġdesign ing +ĠEconom y +ON G +W omen +ĠN ancy +er ver +Ġmas cul +Ġcasual ties +Ġ2 25 +ĠS ullivan +ĠCh oice +Ġa ster +w s +Ġhot els +Ġconsider ations +Ġcou ch +ĠSt rip +ĠG n +Ġmanip ulate +l ied +Ġsynt hetic +Ġassault ed +Ġoff enses +ĠDra ke +Ġim pe +Oct ober +ĠHer itage +h l +ĠBl air +Un like +Ġg rief +Ġ4 50 +Ġopt ed +Ġresign ation +il o +Ġver se +ĠT omb +Ġu pt +Ġa ired +ĠH ook +ĠML B +Ġassum es +out ed +ĠV ers +Ġinfer ior +Ġbund le +ĠD NS +ograp her +Ġmult ip +ĠSoul s +Ġillust rated +Ġtact ic +Ġdress ing +Ġdu o +Con f +Ġrel ent +Ġc ant +Ġscar ce +Ġcand y +ĠC F +Ġaffili ated +Ġspr int +yl an +ĠGarc ia +Ġj unk +Pr int +ex ec +C rit +Ġport rait +ir ies +ĠOF F +Ġdisp utes +W R +L ove +ãģ Ħ +ĠRe yn +Ġh ipp +op ath +Ġflo ors +ĠFe el +Ġwor ries +Ġsett lements +ĠP os +Ġmos que +Ġfin als +Ġcr ushed +ĠPro bably +ĠB ot +ĠM ans +ĠPer iod +Ġsovere ignty +Ġsell er +Ġap ost +Ġam ateur +Ġd orm +Ġconsum ing +Ġarm our +ĠRo ose +Ġint ensive +Ġelim inating +ĠSun ni +ĠAle ppo +j in +Ġadv ise +p al +ĠH alo +Ġdes cent +Ġsimpl er +Ġbo oth +ST R +L ater +ĠC ave +== = +Ġm ol +Ġf ist +Ġshot gun +su pp +Ġrob bery +E ffect +Ġobsc ure +ĠProf essional +Ġemb assy +Ġmilit ant +Ġinc arcer +Ġgener ates +Ġlaun ches +Ġadministr ators +Ġsh aft +Ġcirc ular +Ġfresh man +ĠW es +ĠJo el +ĠD rew +ĠDun can +ĠApp arently +s ight +ĠIntern al +ĠInd ividual +ĠF E +Ġb ore +ĠM t +Ġbroad ly +ĠO ptions +ount ain +ip es +ĠV ideos +20 4 +Ġh ills +Ġsim ulation +Ġdisappoint ment +it an +ĠLabor atory +Ġup ward +Ġbound ary +Ġdark er +h art +Ġdomin ance +C ong +ĠOr acle +ĠL ords +Ġscholars hip +ĠVin cent +ed e +ĠR ah +Ġencour ages +ro v +Ġqu o +Ġprem ise +ĠCris is +ĠHol ocaust +Ġrhyth m +Ġmet ric +cl ub +Ġtransport ed +Ġn od +ĠP ist +Ġancest ors +ĠFred er +th umbnails +ĠC E +ON D +Ph il +ven ge +ĠProduct s +cast le +Ġqual ifying +ĠK aren +VERTIS EMENT +Ġmight y +Ġexplan ations +Ġfix ing +D i +Ġdecl aring +Ġanonym ity +Ġju ven +ĠN ord +ĠDo om +ĠAct ually +O k +ph is +ĠDes ert +Ġ11 6 +I K +ĠF M +Ġinc omes +V EL +ok ers +Ġpe cul +Ġlight weight +g ue +Ġacc ent +Ġincre ment +ĠCh an +Ġcompl aining +ĠB aghd +Ġmidfield er +Ġover haul +Pro cess +ĠH ollow +ĠTit ans +Sm all +man uel +ĠUn ity +ĠEv ents +S ty +Ġdispro portion +n esty +en es +ĠC od +Ġdemonstr ations +ĠCrim son +ĠO H +Ġen rolled +Ġc el +ĠBre tt +Ġa ide +Ġhe els +Ġbroad band +Ġmark ing +Ġw izard +ĠN J +ĠChief s +Ġingred ient +Ġd ug +ĠSh ut +urch ase +end or +Ġfar mer +ĠGold man +12 9 +15 5 +Or der +Ġl ion +i ably +Ġst ain +ar ray +ilit ary +ĠFA Q +Ġexpl oded +ĠMcC arthy +ĠT weet +ĠG reens +ek ing +l n +ens en +Ġmotor cycle +Ġpartic le +Ġch olesterol +B ron +Ġst air +Ġox id +Ġdes irable +ib les +Ġthe or +for cing +Ġpromot ional +ov o +b oot +ĠBon us +raw ling +Ġshort age +ĠP sy +Ġrecru ited +Ġinf ants +Ġtest osterone +Ġded uct +Ġdistinct ive +Ġfirm ware +bu ilt +14 5 +Ġexpl ored +Ġfact ions +Ġv ide +Ġtatt oo +Ġfinan cially +Ġfat igue +Ġproceed ing +const itutional +Ġmis er +Ġch airs +gg ing +ipp le +Ġd ent +Ġdis reg +ç Ķ +st ant +ll o +b ps +aken ing +Ġab normal +ĠE RA +å£ « +ĠH BO +ĠM AR +Ġcon cess +Ġserv ant +Ġas pir +l av +ĠPan el +am o +Ġprec ip +Ġrecord ings +Ġproceed ed +Ġcol ony +ĠT ang +ab lo +Ġstri pped +Le ft +to o +Ġpot atoes +Ġfin est +% ). +Ġc rap +ĠZ ach +ab ases +ĠG oth +Ġbillion aire +w olf +Ġsan ction +S K +Ġlog ged +P o +ey ed +un al +Ġcr icket +Ġarm ies +Ġunc overed +Cl oud +ó n +Ġreb ounds +Ġm es +O per +P ac +Ġnation ally +Ġinsert ed +p ict +Ġgovern ance +Ð ¸ +Ġprivile ges +G ET +Ġfavor ites +im ity +Ġlo ver +the m +em pl +Ġgorge ous +An n +Ġsl ipped +Ġve to +B ob +Ġsl im +u cc +ĠF ame +udden ly +Ġden ies +ĠM aur +Ġdist ances +Ġw anna +t ar +ĠS ER +Ġâ Ī +Ġle mon +at hetic +Ġlit eral +Ġdistingu ished +Ġansw ering +G I +Ġrelig ions +ĠPhil os +ĠL ay +Ġcomp os +ire ments +ĠK os +ine z +roll ing +Ġyoung est +and ise +ĠB orn +Ġalt ar +am ina +ĠB oot +v oc +Ġdig ging +Ġpress ures +Ġl en +26 4 +Ġassass ination +ĠBir mingham +ĠMy th +Ġsovere ign +ĠArt ist +ĠPhot ograph +Ġdep icted +Ġdisp ens +orth y +Ġamb ul +int eg +ĠC ele +ĠTib et +Ġhier archy +Ġc u +Ġpre season +ĠPet erson +Ġcol ours +Ġworry ing +Ġback ers +ĠPal mer +ĠÎ ¼ +Ġcontribut or +Ġhear ings +Ġur ine +Ġ Ù +ourge ois +Sim ilar +ĠZ immer +s omething +ĠUS C +Ġstrength s +ĠF I +Ġlog ging +As ked +ĠTh ai +in qu +ĠW alt +Ġcrew s +it ism +3 01 +Ġshar ply +um ed +Ġred irect +r ators +In f +ĠWe apons +Ġte asp +19 99 +L ive +ĠEs pecially +ĠS ter +ĠVeter ans +Ġint ro +other apy +Ġmal ware +Ġbre eding +Ġmole cular +ĠR oute +ĠCom ment +oc hem +Ġa in +Se ason +Ġlineback er +Ä « +ĠEconom ics +es ar +ĠL ives +ĠEm ma +Ġk in +ĠTer rit +Ġpl anted +ot on +ĠBut ter +ĠSp ons +P ER +Ġdun geon +Ġsymb olic +Ġfil med +Ġdi ets +Ġconclud es +Ġcertain ty +ĠForm at +Ġstr angers +form at +ĠPh ase +Ġcop ied +Ġmet res +ld a +ĠUs ers +Ġdeliber ate +Ġwas hed +ĠL ance +im ation +Ġimpro per +ĠGen esis +ick r +ĠK ush +Ġreal ise +Ġembarrass ing +alk ing +b ucks +Ġver ified +Ġout line +year s +ĠIn come +20 2 +Ġz ombies +F inal +ĠMill enn +Ġmod ifications +ĠV ision +ĠM oses +ver b +iter ranean +ĠJ et +Ġnav al +ĠA gg +Ġur l +Ġvict ories +Ġnon etheless +Ġinj ust +ĠF act +ç ļ +Ġins ufficient +re view +face book +Ġnegoti ating +Ġguarant ees +im en +uten berg +Ġg ambling +Ġcon gr +Load ing +Ġnever theless +Ġpres idents +ĠIndust rial +Ġ11 8 +Ġp oured +ĠT ory +Ġ17 5 +Ġ: = +Sc ott +ange red +T ok +Ġorgan izers +M at +ĠG rowth +Ġad ul +Ġens ures +Ġ11 7 +é¾į å +Ġmass acre +Ġgr ades +be fore +AD VERTISEMENT +ĠSl ow +ĠM MA +âĢĶ " +ĠV atican +Q aeda +Ġo we +66 66 +ĠS orry +ĠGr ass +Ġbackground s +Ġexha usted +Ġcl an +Ġcomprom ised +ĠE lf +ĠIsa ac +ens on +In vest +IF A +Ġinterrupt ed +ãĥī ãĥ© +Ġtw isted +ĠDrag ons +M ode +ĠK remlin +Ġfert il +he res +ph an +ĠN ode +f ed +ĠOr c +Ġunw illing +C ent +Ġprior it +Ġgrad uates +Ġsubject ive +Ġiss uing +ĠL t +Ġview er +Ġw oke +Th us +bro ok +Ġdep ressed +Ġbr acket +ĠG or +ĠFight ing +Ġstri ker +Rep ort +ĠPortug al +Ġne o +w ed +19 9 +Ġflee ing +sh adow +ident ified +US E +Ste am +Ġstret ched +Ġrevel ations +art ed +ĠD w +Ġalign ment +est on +ĠJ ared +S ep +Ġblog s +up date +g om +r isk +Ġcl ash +ĠH our +Ġrun time +Ġunw anted +Ġsc am +Ġr ack +Ġen light +on est +ĠF err +Ġconv ictions +Ġp iano +Ġcirc ulation +ĠW elcome +Ġback lash +ĠW ade +Ġrece ivers +ot ive +J eff +Ġnetwork ing +ĠPre p +ĠExpl orer +Ġlect ure +Ġupload ed +ĠMe at +B LE +ĠNaz is +ĠSy nd +st ud +ro ots +ri ans +Ġportray ed +Ġ ?? +ĠBudd ha +s un +Rober t +ĠCom plex +Ġover see +Ġste alth +T itle +ĠJ obs +ĠK um +Ġappreci ation +ĠM OD +Ġbas ics +Ġcl ips +Ġnurs ing +Ġpropos ition +Ġreal ised +ĠNY C +Ġall ocated +ri um +ar an +ĠPro duction +ĠV ote +Ġsm ugg +Ġhun ter +az er +ĠCh anges +Ġfl uct +y on +Ar ray +Ġk its +W ater +Ġuncom mon +Ġrest ing +ell s +w ould +Ġpurs ued +Ġassert ion +omet own +ĠMos ul +ĠPl atform +io let +Ġshare holders +Ġtra ils +P ay +ĠEn forcement +ty pes +ĠAn onymous +Ġsatisf ying +il ogy +Ġ( ' +w ave +c ity +Ste ve +Ġconfront ation +ĠE ld +C apt +ah an +ht m +ĠC trl +ON S +2 30 +if a +hold ing +Ġdelic ate +Ġj aw +ĠGo ing +or um +S al +Ġd ull +ĠB eth +Ġpr isons +Ġe go +ĠEl sa +avor ite +ĠG ang +ĠN uclear +Ġsp ider +ats u +Ġsam pling +Ġabsor bed +ĠPh arm +iet h +Ġbuck et +ĠRec omm +O F +ĠF actory +AN CE +Ġb acter +H as +ĠObs erv +12 1 +Ġprem iere +De velop +Ġcur rencies +C ast +Ġaccompany ing +ĠNash ville +Ġfat ty +ĠBre nd +Ġloc ks +Ġcent ered +ĠU T +augh s +or ie +ĠAff ordable +v ance +D L +em et +Ġthr one +ĠBlu etooth +Ġn aming +if ts +AD E +Ġcorrect ed +Ġprompt ly +ĠST R +Ġgen ome +Ġcop e +Ġval ley +Ġround ed +ĠK end +al ion +p ers +Ġtour ism +Ġst ark +v l +Ġblow ing +ĠSche dule +st d +Ġunh appy +Ġlit igation +ced es +Ġand roid +Ġinteg ral +ere rs +ud ed +t ax +Ġre iter +ĠMot ors +oci ated +Ġwond ers +ĠAp ost +uck ing +ĠRoose velt +f ram +Ġyield s +Ġconstit utes +aw k +Int erest +Ġinter im +Ġbreak through +ĠC her +Ġpro sec +ĠD j +ĠM T +Res p +ĠP T +Ġs perm +ed it +B T +Lin ux +count ry +le ague +Ġd ick +Ġo ct +Ġinsert ing +Ġsc ra +ĠBrew ing +Ġ19 66 +Ġrun ners +Ġpl un +id y +ĠD ian +Ġdys function +Ġex clusion +Ġdis gr +Ġincorpor ate +Ġrecon c +Ġnom inated +ĠAr cher +d raw +achel or +Ġwrit ings +Ġshall ow +Ġh ast +ĠB MW +ĠR S +Ġth igh +Ġ19 63 +Ġl amb +Ġfav ored +ag le +Ġcool er +ĠH ours +ĠG U +ĠOrig in +Ġglim pse +---------------- ---- +L im +Ġche ek +Ġj ealous +- ' +Ġhar ness +ĠPo ison +Ġdis abilities +ne apolis +Ġout look +Ġnot ify +ĠIndian apolis +Ġab rupt +ns ic +Ġenc rypted +Ġfor fe +reat h +Ġr abb +Ġfound ations +Ġcompl iment +ĠInter view +ĠS we +Ġad olesc +Ġmon itors +ĠSacrament o +Ġtime ly +Ġcontem pl +Ġposition ed +Ġpost ers +ph ies +iov ascular +v oid +ĠFif th +Ġinvestig ative +OU N +Ġinteg rate +ĠIN C +ish a +ibl ings +ĠRe quest +ĠRodrig uez +Ġsl ides +ĠD X +Ġfemin ism +Ġdat as +Ġb end +ir us +ĠNig eria +F ox +Ch ange +Ġair plane +ĠLad en +Ġpublic ity +ixt y +Ġcommit ments +Ġaggreg ate +Ġdisplay ing +ĠAr row +Ġ12 2 +Ġrespect s +and roid +s ix +ĠSh a +Ġrest oration +) \ +W S +oy s +Ġillust rate +with out +12 6 +ĠâĶ Ĥ +Ġpick up +n els +Ġ .... +f ood +ĠF en +) ? +Ġphenomen a +Ġcompan ions +ĠW rite +Ġsp ill +Ġbr idges +ĠUp dated +ĠF o +Ġinsect s +ASH INGTON +Ġsc are +il tr +ĠZh ang +Ġsever ity +Ġind ul +14 9 +ĠCo ffee +Ġnorm s +Ġp ulse +ĠF T +Ġhorr ific +ĠDest roy +ĠJ SON +Ġo live +Ġdiscuss es +R est +E lect +ĠW inn +ĠSurv iv +ĠH ait +S ure +op ed +Ġro oted +ĠS ke +ĠBron ze +Ġl ol +Def ault +Ġcommod ity +red ited +Ġliber tarian +Ġforb idden +Ġgr an +à ¨ +Ġl ag +en z +dri ve +Ġmathemat ics +Ġw ires +Ġcrit ically +Ġcarb ohyd +ĠChance llor +ĠEd die +Ġban ning +ĠF ri +Ġcompl ications +et ric +ĠBangl adesh +Ġband width +St op +ĠOrig inally +Ġhalf way +yn asty +sh ine +Ġt ales +rit ies +av ier +Ġspin ning +ĠWH O +Ġneighbour hood +b ach +Ġcommer ce +ĠS le +B U +Ġentreprene ur +Ġpecul iar +ĠCom ments +f re +3 20 +IC S +Ġimag ery +ĠCan on +ĠElect ronic +sh ort +( ( +D ig +Ġcomm em +u ced +Ġincl ined +ĠSum mon +Ġcl iff +ĠMed iterranean +Ġpo etry +Ġprosper ity +ĠRe ce +Ġp ills +m ember +Ġfin ale +un c +ĠG ig +ä ½ +Ġl od +Ġback ward +- + +ĠFor ward +Ġth ri +s ure +Ġso ap +ĠF X +R ES +ĠSe xual +oul os +Ġfool ish +Ġright eous +Ġco ff +terror ism +ust ain +ot er +Ġab uses +ne xt +Ġab usive +Ġthere after +Ġprohib ition +ĠS UP +Ġd ip +Ġr ipped +Ġinher ited +Ġb ats +st ru +G T +Ġflaw ed +ph abet +Ġf og +do ors +Ġim aging +Ġdig its +ĠHung ary +Ġar rog +Ġteach ings +Ġprotocol s +ĠB anks +à ¸ +p ound +ĠC urt +." ) +. / +Ġex emption +end ix +ĠM ull +Ġimpro ves +ĠG amer +d imensional +I con +ĠMarg aret +St atus +d ates +Ġint ends +Ġdep ict +Ġpark ed +J oe +ĠMar ines +chn ology +! ). +Ġjud ged +Ġwe ights +R ay +Ġapart ments +he ster +Ġrein force +Ġoff ender +occ up +Ġs ore +e pt +ĠPH P +ĠB row +Ġauthor ization +ĠR isk +ĠDel aware +ĠQ U +Ġnot ifications +Ġsun light +Ġex clude +d at +Ġm esh +ĠSud an +Ġbelong ed +Ġsub way +Ġno on +ĠInter ior +ol ics +ĠL akers +Ġc oding +Dis claimer +Cal if +O ld +Ġdis l +???? ? +Ġconfir ms +Ġrecruit ment +Ġhom icide +Cons ider +ĠJeff rey +ft y +} ; +Ġobject ion +do ing +ĠLe o +W ant +Ġgl ow +ĠClar ke +ĠNorm an +Ġver ification +Ġpack et +ĠForm ula +Ġpl ag +es ville +Ġshout ing +Ġo v +ĠR EC +ĠB ub +Ġn inth +Ġener g +Ġvalid ity +Ġup s +j ack +Ġneighbor ing +ĠN ec +ew orks +ĠH ab +are z +Ġsp ine +Ġevent ual +ĠLe aders +ĠC arn +Ġprob ation +Ġrom ance +ms g +ĠMechan ical +ER Y +R ock +Ġpart isan +N ode +ass ets +min ent +Ġforeign ers +Ġtest ify +ĠUs ually +l ords +ĠG ren +ĠPow ell +BI L +Ġs r +Ġadd ict +Ġshell s +Ġs igh +ĠY ale +tern ity +Ġ7 50 +E U +ĠR ifle +Ġpat ron +em a +ĠB annon +an ity +Ġtrop ical +ĠV II +c ross +Every thing +ĠIS O +Ġhum ble +ass ing +ĠF IG +Ġupd ating +ys on +Ġcal cium +Ġcompet ent +Ġste ering +Pro t +ĠS Y +ĠFin als +ĠR ug +15 9 +13 7 +ĠG olf +Ġ12 6 +Ġaccommod ation +ĠHug hes +Ġaest hetic +art isan +ĠTw ilight +Ġpr ince +ĠAgric ulture +ĠDis co +Ġpreced ent +Ġtyp ing +author ized +O ption +ĠA ub +l ishes +ach t +m ag +P eter +ĠU FO +mont on +ĠL ith +Ġa rom +Ġsec uring +Ġconf ined +priv ate +Ġsw ords +Ġmark ers +Ġmetab olic +se lect +ĠCur se +ĠO t +g ressive +Ġinc umb +ĠS aga +Ġpr iced +Ġclear ance +Cont ent +Ġdr illing +Ġnot ices +Ġb ourgeois +Ġv est +Ġcook ie +ĠGuard ians +ry s +in yl +Ġ12 4 +Ġpl ausible +on gh +ĠOd in +Ġconcept ion +ĠY uk +ĠBaghd ad +ĠFl ag +Aust ral +ĠI BM +Ġintern ationally +ĠWiki Leaks +I ED +Ġc yn +Ġcho oses +ĠP ill +Ġcomb ining +Ġrad i +ĠMoh ammed +def ense +atch ing +Sub ject +ic iency +Fr ame +Ġ{ " +Ġche ss +Ġtim er +19 0 +Ġt in +Ġord inance +emet ery +Ġacc using +Ġnotice able +Ġcent res +Ġl id +ĠM ills +img ur +Ġz oom +erg ic +Ġcomp ression +pr im +f ind +Ġsur g +Ġp and +ĠK ee +ĠCh ad +cell ence +oy le +Ġsocial ism +ĠT ravis +ĠM Hz +Ġgu ild +ALL Y +ĠSub scribe +ĠRel ated +Ġoccur rence +itch ing +Ġfict ional +Ġcr ush +ĠE A +c od +m ix +ĠTri ple +Ġretrie ve +Ġstimul us +Ġpsych iat +ĠDo or +Ġhomosexual ity +Ġelement ary +Ġcell ular +id ian +ĠL aun +Ġintrig uing +Ġfo am +ĠB ass +id i +its u +Ġass ure +Ġcongr at +Ġbusiness man +ĠBo ost +cl ose +Ġl ied +Ġsc iences +ĠO mega +ĠG raphics +Ġ< = +sp oken +Ġconnect ivity +S aturday +ĠAven gers +Ġto ggle +Ġank le +Ġnational ist +mod el +ĠP ool +ophob ia +V ar +ĠM ons +ator ies +Ġaggress ively +C lear +For ge +act ers +Ġhed ge +Ġpip es +Ġbl unt +Ġs q +Ġremote ly +W ed +as ers +Ġref riger +Ġt iles +Ġresc ued +Ġcompr ised +ins ky +Ġman if +avan augh +Ġprol ifer +Ġal igned +x ml +Ġtri v +Ġcoord ination +ĠP ER +ĠQu ote +13 4 +b f +ĠS aw +Ġtermin ation +Ġ19 0 +Ġadd itions +Ġtri o +Ġproject ions +Ġpositive ly +Ġin clusive +Ġmem br +19 90 +old er +Ġpract iced +ink le +Ar ch +Ġstar ters +ari us +Ġinter mediate +ĠBen ef +ĠK iller +Ġinter ventions +ĠK il +ĠF lying +In v +Ġprem ature +Ġpsych iatric +Ġind ie +Ġcoll ar +ĠRain bow +af i +Ġdis ruption +ĠFO X +cast ing +Ġmis dem +c ro +Ġw ipe +ard on +Ġb ast +ĠTom my +ĠRepresent ative +Ġbell y +ĠP O +ĠBre itbart +13 2 +Ġmess aging +Sh ould +Ref erences +ĠG RE +ist ical +L P +ĠC av +ĠC razy +Ġintu itive +ke eping +ĠM oss +Ġdiscont in +ĠMod ule +Ġun related +ĠPract ice +ĠTrans port +Ġstatist ically +orn s +Ġs ized +p u +Ġca f +ĠWorld s +ĠRod gers +ĠL un +ĠCom ic +l iving +Ġc ared +Ġclim bed +) { +Ġconsist ed +Ġmed ieval +fol k +Ġh acked +Ġd ire +ĠHerm ione +Ġt ended +ce ans +D aniel +w ent +Ġlegisl ators +Ġred es +g ames +Ġg n +am iliar +Ġ+ + +gg y +th reat +Ġmag net +Ġper ceive +Ġz ip +Ġindict ment +Ġcrit ique +g ard +ĠSaf e +ĠC ream +Ġad vent +ob a +Ġv owed +ous ands +Ġsk i +Ġabort ions +u art +Ġstun ned +Ġadv ancing +Ġlack ed +Ġ\ " +Ġsch izophren +Ġeleg ant +Ġconf erences +Ġcance led +ĠHud son +ĠHop efully +Ġtr ump +Ġfrequ encies +Ġmet eor +ĠJun ior +ĠFle et +ĠMal colm +ĠT ools +Ġ ........ +Ġh obby +ĠEurope ans +Ġ15 00 +ĠInt o +Ġs way +ĠApp ro +ĠCom pl +Comm unity +Ġt ide +ĠSum mit +ä » +Ġinter vals +ĠE ther +Ġhabit at +ĠSteven s +lish ing +ĠDom ain +Ġtrig gers +Ġch asing +Ġchar m +ĠFl ower +it ored +Ġbless ing +Ġtext ures +F ive +Ġliqu or +R P +F IN +Ġ19 62 +C AR +Un known +Ġres il +ĠL ily +Ġabund ance +Ġpredict able +r ar +Ġbull shit +le en +che t +M or +M uch +ä ¹ +Ġemphas ized +Ġcr ust +Ġprim itive +Ġenjoy able +ĠPict ures +Ġteam mate +pl er +ĠT ol +ĠK ane +Ġsummon ed +th y +ram a +ĠH onda +Ġreal izing +Ġquick er +Ġconcent rate +cle ar +Ġ2 10 +ĠErd ogan +ar is +Ġrespond s +ĠB I +Ġelig ibility +Ġpus hes +ĠId aho +Ġagg rav +Ġru ins +ur ations +Ġb ans +Ġan at +sh are +Ġgr ind +h in +um en +Ġut ilities +ĠYan kees +Ġdat abases +ĠD D +Ġdispl aced +Ġdepend encies +Ġstim ulation +h un +h ouses +ĠP retty +ĠRaven s +ĠTOD AY +Ġassoci ates +Ġthe rape +cl ed +Ġde er +Ġrep airs +rent ice +Ġrecept ors +Ġrem ed +ĠC e +Ġmar riages +Ġball ots +ĠSold ier +Ġhilar ious +op l +13 8 +Ġinherent ly +Ġignor ant +Ġb ounce +ĠE aster +REL ATED +ĠCur rency +E V +ãĥ ŀ +ĠLe ad +Ġdece ased +B rien +ĠMus k +J S +Ġmer ge +heart ed +c reat +m itt +m und +ĠâĢ ĭ +ĠB ag +Ġproject ion +Ġj ava +ĠStand ards +ĠLeon ard +Ġcoc onut +ĠPop ulation +Ġtra ject +Ġimp ly +Ġcur iosity +ĠD B +ĠF resh +ĠP or +Ġheav ier +ne ys +gom ery +Ġdes erved +Ġphr ases +ĠG C +Ġye ast +d esc +De ath +Ġreb oot +Ġmet adata +IC AL +Ġrep ay +ĠInd ependence +Ġsubur ban +ical s +Ġat op +Ġall ocation +gener ation +ĠG ram +Ġmoist ure +Ġp ine +ĠLiber als +Ġa ides +Ġund erest +ĠBer ry +Ġcere mon +3 70 +ast rous +ĠPir ates +Ġt ense +ĠIndust ries +ĠApp eals +ĠN ear +Ġè£ı ç +Ġlo vers +ĠC AP +ĠC raw +Ġg iants +Ġeffic acy +E lement +ĠBeh avior +ĠToy ota +Ġint est +P riv +A I +Ġmaneu ver +Ġperfect ion +Ġb ang +p aper +r ill +Ge orge +b order +in ters +ĠS eth +Ġcl ues +ĠLe vi +ĠRe venue +14 7 +Ġv apor +Ġfortun ate +Ġthreat ens +Ġve t +Ġdepend ency +ers ed +art icle +ĠBl izzard +Ġch lor +Ġmin us +ĠB ills +Ġcryptoc urrency +Ġmetabol ism +ter ing +Ġp estic +step s +ĠTre asure +ract ed +ĠConst ant +Ġtem p +13 9 +ĠDet ective +ur ally +Ġrecover ing +Ġcort ex +Ġ14 4 +cl osed +Ġprejud ice +aun ted +Ġstorm s +ĠN OW +Ġmach inery +Add ress +Ġcompe lled +27 0 +Ġdesp air +b ane +Ġveget able +Ġbed s +Lear n +Ġcolor ful +Ġsp ike +Ġmarg ins +Ġsymp athy +Ġworks hop +ĠC BC +S at +Ġburn s +ĠG ender +Ġ12 9 +ĠC able +Ġdeb ts +ĠThe resa +Ġreflect ing +Ġa irst +Ġr im +ram id +Ġweakness es +W rit +ogg le +t i +ĠCh arge +Ġwe ighed +Ġ( . +Ġl aughter +Ġrou ter +ĠDemocr acy +D ear +Ġhas ht +Ġd y +Ġhint s +run ning +Ġfin ishes +ar us +M ass +res ult +asc us +Ġv intage +Ġcon qu +Ġwild ly +ac ist +Ġl ingu +Ġprot agonist +st rom +te enth +ĠSol o +m ac +f illed +Ġre nown +it ives +Ġmot ive +ĠAnt ar +ĠM ann +ĠAd just +Ġrock ets +Ġtrou bling +e i +Ġorgan isms +ass is +Christ ian +Ġ14 5 +ĠH ass +Ġsw all +Ġw ax +ĠSurv ival +V S +ĠM urd +v d +stand ard +Ġdrag ons +Ġacceler ation +r ational +f inal +Ġp aired +ĠE thereum +Ġinterf aces +Ġres ent +Ġartif acts +Å « +are l +Ġcompet itor +ĠNich olas +ĠSur face +c pp +ĠT ot +Ġeconom ically +Ġorgan ised +Ġen forced +in ho +Ġvar ieties +Ġab dom +ĠBa iley +id av +ĠSal v +p aid +Ġalt itude +ess ert +ĠG utenberg +are a +op oulos +Ġprofess ors +igg s +ĠF ate +he y +Ġ3 000 +D ist +Ġtw ins +c ill +ĠM aps +Ġtra ps +Ġwe ed +ĠK iss +Ġy oga +Ġrecip ients +ĠWest minster +Ġpool s +ĠWal mart +18 8 +ĠSchool s +att ack +ĠAR M +par agraph +W arning +j l +Ġself ish +anche z +ĠHe ights +F re +ĠS oph +Ġ -------------------------------- +t ml +33 3 +Ġraid s +Ġsatell ites +KE Y +Ġlast s +Ñ Ĥ +In s +ĠD ame +Ġunp redict +// / +gh ai +Ġart illery +Ġcru ise +Ġg el +ĠCabin et +Ġbl ows +ĠE sp +Ġprox imity +ot he +ĠSk ills +ĠU pper +ob o +ĠN DP +Ġenjoy s +Ġrepe ating +ĠConst ruction +ĠQuest ions +H illary +Ġu int +Ġprocess ors +ĠGib son +ĠMult iple +q a +ĠB om +ĠM iles +vent ional +Ġhur ts +s kin +ĠA IDS +Ġadvis ers +ĠR oot +Ġmethod ology +ĠD ale +Ġdet on +ĠKnow ledge +sequ ently +Ġ12 1 +Ġconnect s +C y +ĠD anger +Ġcontribut ors +ĠB ent +Ġbr ass +ĠGun s +int o +ĠFort une +Ġbro ker +bal ance +Ġlength s +Ġv ic +Ġaver aging +Ġappropri ately +ĠCamer a +Ġsand wich +ĠCD C +Ġcoord inate +Ġnav ig +Ġgood ness +l aim +Ġbra ke +Ġextrem ist +ĠW ake +ĠM end +ĠT iny +ĠC OL +ĠR F +ĠD ual +ĠW ine +C ase +Ġref ined +Ġl amp +L ead +Ġb apt +ĠCar b +ĠS add +ĠMin neapolis +PD F +Ear ly +ĠH idden +I ts +ĠT IME +Ġp ap +Ġcommission ed +ĠF ew +ĠCol ts +ĠB ren +Ġbot hered +Ġlike wise +Ex per +ĠSch w +c ry +n n +ĠM itch +im on +M G +b m +UM P +r ays +Ġregist ry +Ġ2 70 +ach ine +re lla +ant ing +00 000 +Ġru ined +sp ot +Ġt a +Ġmaxim ize +Ġincon ven +D ead +H uman +En abled +ĠMar ie +Ġch ill +ĠParad ise +Ġstar ring +ĠLat ino +ĠProt ocol +ĠE VER +Ġsuppl iers +m essage +ĠBro ck +Ġser um +âĸĪâĸĪ âĸĪâĸĪ +Ġen comp +Ġamb ition +ues e +Ġar rows +And rew +Ġanten na +Ġ19 61 +ĠB ark +Ġb ool +ãĤ ª +ĠSt orage +Ġrail way +Ġtoug her +ĠC ad +Ġwas hing +P y +' ] +em bed +ĠMem phis +ack le +Ġfam ously +ĠF ortunately +ov ies +Ġmind set +Ġsne ak +ĠD h +RA W +ĠSim pson +Ġliv est +Ġland mark +Ġc ement +L ow +Ġthr illed +ĠCour se +in el +Ġch uck +id ate +gl obal +Ġwh it +Ġ � +ad ays +s ki +ĠS V +Ġvir uses +30 6 +ĠResp ons +Ġthe aters +ĠBr anch +ĠGene va +ĠM K +Ġunbel iev +Ġcommun ist +Orig inal +ĠRe ceived +ĠTrans fer +ĠAr g +In put +ĠStr ategy +Ġpal ace +the ning +D ri +Ġsent encing +umbn ail +Ġp ins +re cy +Ġs iblings +Get ting +ĠB U +ĠNorth west +Ġprolong ed +ĠSak ura +C omb +ĠB our +Ġinadequ ate +ĠK ash +Ġus ername +ĠImpro ve +Ġbatt ling +ĠM AC +Ġcurric ulum +Ġs oda +ĠC annon +Ġsens ible +sp ons +De cember +Ġw icked +ĠP engu +Ġdict ators +ĠHe arts +og yn +Ġsimilar ities +ĠSt ats +Ġh ollow +it ations +": [ +Ġh over +ĠList en +s ch +S und +Ġc ad +ĠPar ks +Ġl ur +Ġhy pe +ĠL em +N AME +is ure +Fr iday +Ġshoot s +Ġclos es +Ġd b +ĠR idge +ĠDiff erent +Ġrepl ies +ĠBroad way +op ers +Ġint oler +ĠZe us +akes pe +Ġpropri etary +Ġrequest ing +Ġcontro llers +ĠM IN +im edia +be cca +Ġexp ans +Ġoil s +B ot +ĠCh and +Ġpr inter +Ġto pped +ĠP OL +ĠEar lier +S ocial +av in +Ġdecre ases +ĠSe b +Ġspecific ations +ĠBl ast +ĠK urt +Ġfre el +B rown +Ġdil ig +ro e +ĠPro blem +ĠQu ad +Ġdecent ral +ĠV ector +an ut +Ġplug ins +ĠGreg ory +Ġfuck ed +el ines +ĠAmb assador +t ake +Ġcle ans +ong yang +An onymous +st ro +" } +al ine +ĠO dd +ĠE ug +2 16 +Ġbo il +ĠP owers +Ġnurs es +Ob viously +ĠTechn ical +Ġexceed ed +OR S +Ġextrem ists +Ġtr aces +ex pl +Ġcom r +ĠS ach +) / +Ġm asks +Ġsc i +B on +Ġreg ression +we gian +Ġadvis or +it ures +ĠV o +ex ample +ĠInst ruct +Ġs iege +Ġredu ctions +pt r +Ġstat utory +Ġrem oves +Ġp uck +red its +Ġbe e +Ġsal ad +Ġpromot ions +ĠJosh ua +with standing +ET H +ĠCh a +im us +Ġexpend iture +aun ting +Ġdelight ed +Ġ15 5 +be h +Ġcar pet +ĠSp art +Ġj ungle +l ists +Ġbull ying +ĠNob el +ĠGl en +Ġreferen ced +Ġintrodu ces +se in +Ġcho pped +gl ass +ĠW rest +Ġneutral ity +Ġâ Ļ +Ġinvestig ator +Ġshel ves +Ġun constitutional +Ġreprodu ction +Ġmer chant +m ia +Ġmet rics +Ġexplos ives +ĠSon ia +Ġbod ily +Ġthick ness +Ġpredomin antly +ĠAb ility +Ġmon itored +IC H +Ġ] . +ĠMart inez +Ġvis ibility +Ġqu eries +Ġgen ocide +ĠWar fare +Qu ery +Ġstud ios +Ġemb ry +Ġcorrid or +Ġclean ed +com plete +ĠM H +Ġenroll ment +ING S +Ġimpact ed +Ġdis astrous +ĠY un +ĠCl aire +ĠBas ically +y t +uster ity +Ġindirect ly +w ik +Ġd od +ĠCar r +Ġam p +Ġprohib it +ĠIn itial +ĠR d +ij i +Ġeduc ate +c orn +i ott +ĠBeaut y +Ġdetect ive +ĠCon n +s ince +Ġst agger +Ġob ese +Ġb ree +olog ic +is se +walk er +Ġbl ades +Ġlaw ful +fun c +ĠBeh ind +Ġappet ite +Ġ( * +Ġt ennis +Ġoff spring +Ġj ets +Ġstruct ured +Ġafore mentioned +N ov +Ġsc aling +f ill +Ġst ew +Ġcur b +ĠStep han +ed In +S F +ob ic +é ŃĶ +ou g +ĠM M +Ġgen etically +ope z +13 6 +Ġu mb +anc ers +Ġcoh ort +Ġmerch andise +Ġimp osing +ĠLegisl ature +ĠArch ive +iv ia +ĠN aval +Ġoff ences +Ġmir acle +Ġsn apped +Ġf oes +Ġextensive ly +ĠR af +Ġc ater +ed ience +K it +ĠB in +Ġrecomm ends +ĠC ities +Ġrig id +ĠRE AD +ĠNob le +ĠT ian +Ġcertific ates +ant is +o iler +ĠBudd hist +d id +Ġsurvey ed +Ġdown ward +Ġprint s +ĠMot ion +ron ics +ĠS ans +oss ibly +u ctions +Ġcolon ies +ĠDan ish +un it +Ġsp oil +Ġadvis ory +ber ries +Pl an +Ġspecific ation +op hers +ĠRes ource +Ġsh irts +prising ly +commun ications +Ġtriv ial +Ġmention ing +ise xual +Ġsupp lements +Ġsuper vision +B P +v or +Ġw it +Ġco oldown +Ġplaint iff +ĠReview s +ĠS ri +ĠM int +ĠSug ar +Ġafter ward +ĠPri est +ĠInvest ment +og ene +ĠT aking +Ġstretch ing +Ġinflamm ation +ĠTe hran +Ġl ining +Ġfree zing +ĠEnt ity +Ġins piring +spe cial +pr ice +Ġsu e +ĠP orter +oun ge +ET A +ĠD erek +ĠLu is +u o +ym ph +Ġex terior +ih il +ĠAsh ley +in ator +Ġnut rients +ĠTh rones +Ġfin ances +ĠIn spect +Ġspe cially +ĠRequ ired +ĠP TS +ĠViol ence +oint ed +sh ots +Ġex cerpt +co on +IN S +ĠG ri +Ġrecogn ised +We ek +You ng +Ġv om +is le +ĠCur ry +ĠBudd h +Ġnot ebook +Ġd urable +/ ? +ĠG ad +ĠP upp +Ġforg ive +p ark +Ġpersonal ities +an alysis +cl amation +Ġelev ator +Ġware house +ĠR ole +un n +Ġillust ration +ĠSc an +Ġatmosp heric +Im port +AN C +rict ed +f u +01 0 +Ġar che +Ġreward ed +akespe are +Ġintern ally +ĠR BI +alk er +Ġeleph ant +ow itz +ĠP izza +Ġbip artisan +é s +Ġslow ed +ĠSt ark +Ġover ride +OU S +Ġ3 20 +undred s +ĠDe ck +ĠC ensus +be e +14 6 +ot or +Ġ ip +Ġu b +oc ations +ĠBut ton +r ice +Ġc ripp +ff f +Ġorig inated +Ġoverwhel med +app a +Ġfore most +âĢ ij +ĠL EG +re lease +eat ured +at ches +Ġre ps +Ġl ending +ĠRe ference +ĠCl ient +16 5 +vent h +Com plete +ĠPat rol +Ġsw orn +c am +Ġshut tle +ĠR alph +Ġh ometown +- , +on al +ĠB P +å ı +Ġpersu ade +ĠAlex and +Ġcomb ines +Ġv ivid +ĠL ag +Ġenc oding +Ġsal vation +w en +ĠRec overy +i ya +Un iversity +ĠB iden +Ġbud gets +ĠTex ans +f its +Ġhon ored +Ġp ython +T D +## # +cl one +Ġbl ink +ĠL iquid +Ġunemploy ed +Ġcl ashes +ĠCoun sel +Ġdirect ing +Ġpun ct +ĠFal cons +Ġsh ark +ĠDam ascus +Ġje ans +Ġemb ark +Ġse ize +Ġup wards +2 80 +ĠE z +ĠAny thing +Ġex otic +l ower +ĠCreat or +ĠU m +Ġsubur bs +ber ger +ĠW end +Ġm int +ĠX X +ĠD ro +Ġsuff ers +Ġher b +t ree +Ġfrag ile +Ġflood ed +ĠAl cohol +ole an +ny der +ĠK O +F ram +Ġ13 6 +Ġow ed +ĠMe lee +ĠH ash +Ġwh isk +Ġsu do +r r +Qu ick +app ro +Ġi i +ĠEx amples +he e +Ġpromot es +per ature +k ar +ĠHon or +Ġs odium +ĠL if +ros so +intend ent +Ġcorrespond ent +F ound +sec ret +Ġident ifies +ag ne +Ġl ou +ĠP P +Ġcoinc idence +m ove +Ġmilit ia +Ġinf iltr +ĠPrim ary +Ġpitch ing +ĠI b +ĠGO OD +ãĤ ¸ +ĠW izards +ir al +ĠVen us +R R +ĠâĢ ķ +ĠCase y +Ġsad ly +Ġadm ire +Ġembarrass ed +c b +M el +Ġtub es +Ġbeaut ifully +ĠQueens land +Bel ow +re z +qu et +ple asant +Ġ « +C amp +Ġdec isive +19 98 +ĠL amb +ut ton +h n +ĠJ agu +au nder +ĠC ord +Ġcl erk +Ġca ffe +Ġwip ed +Ġre im +ĠMount ains +Ġimprison ed +Ġdevelop s +ĠP ra +Ġmodel ing +Any one +ance l +ĠS it +Ġshield s +Ġl awn +Ġcard iovascular +Ġdemonstr ating +Ġpar se +ĠIsrael is +Ġeuro s +14 3 +Ġgl orious +ins ki +ec d +Ġcondition ing +Ġhel pless +Ġmicro sc +ĠHar bor +Ġst akes +Ġ2 60 +Ġun equ +ĠFl oyd +Ġd amp +Ġappar atus +ĠLaw s +Ġcoun ters +Ġindu ce +at able +ĠAh med +Ġsl am +N ovember +Ġpers ist +Ġim minent +á n +Ġsh red +Ġph ases +ĠEd monton +ĠArm strong +ĠMe et +ĠK itty +Ñ Ģ +c irc +ĠAd ult +Ġa rose +ĠX en +D an +g ow +Ġsuper f +ĠAd mir +Ġend ure +Ġkey word +yr us +Ġy arn +Ġpath way +ĠHop kins +mid t +Ġcens orship +d ependent +Ġinstruct or +S ources +Ġto e +Ġball oon +N ob +Ġsw ear +ĠCast ro +Ġgl oss +ĠK avanaugh +Ġremark ably +Ph otos +ĠN om +ĠS outheast +y ers +Ġvalid ation +Ġcann on +ĠVict ory +ĠPier re +Ġcaut ious +Aud io +Ġf etch +ĠG ift +ĠH yp +Ġrem edy +Z E +Ġsc ent +Ġbe ard +ĠR ut +- " +Ġpat ents +H y +Ġun just +Ġpot ato +Ġforth coming +Ġche f +ĠR ift +aff e +ĠR OM +ĠL aunch +Ġp ads +ĠNe o +Ġon set +Ġsquee ze +s afe +Ġpref ix +ĠT M +ĠN early +ĠClin ical +ĠM ental +ot iation +ĠUn ic +ant ry +ĠC ir +Ġep it +à ¦ +Ġextract ed +verse ly +ri ad +Ġstr ains +Ġto ps +Ġpo em +ĠRand y +ĠMap le +TH ER +up iter +ĠSS D +ļ é +Ġun con +per ing +Ġsle pt +in ers +Ġunder water +ĠEv idence +g one +20 5 +Ġhistor ians +Ġsynt hesis +Ġf rog +b asketball +Ġvibr ant +Ġsub ord +Ġ3 65 +ĠD ial +Ġcooper ate +HA HA +Ġgreet ed +15 8 +Ġj azz +Ġinto x +ĠWalk ing +Ġsuper visor +ĠF usion +ĠMer cedes +s end +H am +s d +n l +Ġtour s +ĠF IFA +Ġcul p +g d +30 4 +Ġple as +Ġillust rates +ĠColomb ia +Ġhighlight ing +ĠSum mary +Ġexp osing +ĠD ru +Ġir ony +r itional +ĠCar roll +ĠEll is +P ict +ĠR apt +Ġad apter +Ġun m +Ġcor pse +Ġceleb rities +D en +at um +ĠAp ocalypse +ĠW ag +lin ing +Ġhorm ones +R ub +ĠX i +ĠV aults +20 8 +alky rie +inos aur +Ġfeed s +v ity +Ġdefe ating +W ait +Ġemphas ize +ĠSteel ers +yr inth +le ys +ĠWhe never +Current ly +ĠCl ock +Ġcollect ively +any on +ĠJ P +Ġment ality +Ġdownload s +Ġsurround ings +ĠBarn es +Ġflags hip +Ġindic ators +Ġgra pp +Jan uary +ĠElement al +ĠAthen a +ib al +Ġs ights +Ġcap ita +ĠTreat y +Ġvo iced +ĠG az +let te +Ġy a +Ġexp ired +Leg end +H ot +n ature +Ġunst able +Ġ2 80 +à º +Com ment +AL E +Ġquest s +Ġhand ler +n is +Ġvers atile +Ġconce al +enge ance +ĠInter active +Ġobs essed +ĠDog s +Ġcr acked +S ound +s v +ĠD ylan +ro ads +f x +ĠCath olics +ĠH ag +Ġsl ammed +Ġgl owing +s ale +Ġtiss ues +ĠCh i +ne e +Ġc her +s ic +ur rection +Ġb acon +ul atory +) ." +Ġir regular +FOR M +ass ed +Ġintention al +Ġcompens ate +ĠSpe aking +ĠS ets +15 3 +Ġconvent ions +b ands +em ade +Ġe cc +ĠWin ston +ĠAssass in +ĠBelg ian +Ġdepend ence +Ġnic he +Ġb ark +ĠJ azz +Ġdisadvant age +Ġgas oline +Ġ16 5 +çļ Ħ +ess a +mod ule +ang ular +O Y +ĠTreat ment +it as +ol ation +ĠArn old +Ġfe ud +ĠN est +Ġthe atre +ew ater +Ġmin ors +olic y +ĠH aven +div ision +Ġtr unk +F ar +ĠP ull +Ġcapt uring +Ġ18 00 +ĠTe en +Ġex empl +Ġclin ics +ĠB urg +Ġsubst it +Ġpay load +ĠL av +ĠT roy +ĠW itness +Ġfrag ments +Ġpass words +Ġg ospel +ĠG in +Ġten ants +ol ith +S ix +Pre vious +ĠAg es +ĠDar win +Ġbl at +Ġem pathy +sm ith +b ag +ĠE cho +ĠC amb +ĠM add +ĠB oo +Ġred e +ĠBurn ing +Ġsmooth ly +ĠAd rian +ĠV ampire +ĠMon sters +ste am +Sty le +M a +re a +ĠD war +aly st +urs or +Ġelim ination +Ġcrypt o +ch t +ĠE ternal +âĢ¦ ] +ĠS orce +I ll +N ER +Ġu h +Con clusion +w age +Ġresp ir +Ġrem inis +het ical +Ġg y +Ġutil ized +ic idal +Ġ19 00 +Ġhun ters +ĠSw an +ĠRe act +Ġvis itor +ĠThanks giving +30 8 +Post s +Ġh ips +19 97 +om ers +Ġkn ocking +ĠVeh icle +Ġt il +Ġ13 8 +Ġm i +ĠInvest igation +ĠKen ya +Ġcas ino +Ġmot ives +Ġreg ain +re x +Ġweek ends +Ġstab bed +bor o +Ġexplo ited +ĠHA VE +ĠTe levision +c ock +Ġprepar ations +Ġende av +ĠRem ote +ĠM aker +ĠPro du +ĠEv an +Ġinform ational +ĠLouis ville +15 4 +ĠDream s +Ġpl ots +ĠRun ner +Ġhur ting +Ġacad emy +ĠMont gomery +n m +ĠL anc +ĠAl z +2 10 +el ong +Ġretail er +Ġar ising +Ġrebell ion +Ġbl onde +play ed +Ġinstrument al +C ross +Ġret ention +Ġtherape utic +Ġse as +Ġinfant ry +ĠCl int +Ġprompt ing +Ġbit ch +Ġst ems +ĠK ra +Ġthe sis +ĠB og +ru ed +Ġk ings +Ġcl ay +ific ent +ĠY ES +ĠTh ing +ĠCub s +vey ard +els h +in arily +ĠE y +ĠRoll ing +Ġev olving +Ind ia +Ġrecogn izes +Ġgrad uation +is ers +Ġfert ility +ĠMil an +Comm and +Ġbox ing +Ġ19 43 +Ġgl uten +ĠEm ir +Ġid ol +Ġcon ceived +ĠCre ation +Mer it +udd y +uss ions +ĠLie utenant +iet al +Ġunch anged +ĠSc ale +ĠCrime a +ball s +ator ial +Ġdepth s +Ġempir ical +Ġtrans m +Ġuns afe +miss ible +com fort +15 6 +Ġmechan ic +00 2 +l ins +Ġsm oked +P os +Ġslow ing +Ġl av +Tex as +Ġche ating +ĠMet ropolitan +eth yl +Ġdiscover ing +as se +Ġpen cil +ĠPy ongyang +Ġclos et +ĠShe et +ĠEnt ry +ou stic +Ġmy st +er ate +ari at +Ġminer als +Ġmusic ian +ĠP ul +ĠM az +24 9 +Ġper missions +Ġ iv +en ary +ick ers +ĠB ing +he a +en able +Ġgri ev +Ġassert ed +ĠColon el +Ġaff idav +w o +Ġse ated +ĠR ide +Ġpaint ings +ĠP ix +Ġ13 7 +ish i +umb ai +g otten +ĠEar l +Ġin ning +Ġc ensus +Ġtrave lled +ĠCons ult +18 5 +b ind +Ġsimpl icity +Ġoverlook ed +ĠHelp ful +Ġmon key +Ġoverwhelming ly +Bl ood +ĠFl int +ĠJ ama +ĠPres ent +ĠR age +ĠT A +pt ive +Ġturn out +w ald +ĠD olphins +ĠV PN +Ġon ion +Ġcraft ing +m ma +ĠMerc ury +Ġarr ange +Ġalert s +ĠO T +zb ollah +Ġg ases +ĠRichards on +s al +l ar +Ġfro st +Ġlower ing +Ġacc laim +Ġstart ups +ĠG ain +ess ment +Ġguard ian +äº º +ĠP ie +ĠL inks +Ġmer its +Ġaw ake +Ġparent al +Ġexceed s +Ġid le +ĠPil ot +Ġe Bay +ĠAc cept +ipe g +C am +ĠK ot +Ġtrad ers +olit ics +unk er +ĠP ale +os i +an mar +Ġ19 47 +ĠF ell +est ial +it ating +G F +ĠS r +if ted +Ġconnect or +ĠB one +ill es +2 60 +h ma +Ġoverl ap +ĠGit Hub +Ġclean er +ĠBapt ist +ĠW AS +Ġlung s +Ñ ģ +ĠB UT +Ġc ite +Ġpit ched +reat ment +Ġtro phies +ĠN u +38 6 +ĠPr ide +Ġattend ees +[ ] +17 9 +Ġspat ial +Ġpri zes +ĠRel igion +Ġshow case +ĠC ategory +vid ia +T arget +Pro perty +? , +Ġf usion +p ie +ĠU CLA +Ġsound track +Ġprin cess +ĠC aval +sh ould +Ġlim bs +Back ground +Ġlone ly +Ġc ores +ĠT ail +she et +Ġ13 2 +R a +ãĤ « +ĠB olt +Ġbook ed +Ġadmin ister +Ġequ als +w y +Ġobserv ing +ĠBar on +ĠAd obe +Ġv irgin +ĠSocial ist +M ove +gh azi +ĠLind a +2 12 +Ġbre wing +Ġmerch ants +bur se +Ġdiv or +Ġmet als +ĠN er +Ġsum s +ĠEn emy +Ġen vision +Ġgrant ing +ĠH oney +ĠSk yrim +Ġsoc io +gr aded +Ġselect ive +W ASHINGTON +Ġ19 48 +ĠSir ius +ĠG ross +act ivity +ĠI van +Ġfur ious +BS D +ĠPre vious +Ġrespons ive +Ġchar itable +Ġle aning +ĠP ew +Ġviol ates +\\\\ \\\\ +ĠCom ing +w ire +Ġpo et +Ġres olutions +comm and +ĠPortug uese +Ġnick name +Ġde af +Feb ruary +Ġrecogn ise +Ġentire ty +Ġseason al +pl aced +ĠTe legraph +Ġmicro phone +our ing +Ġgr ains +Ġgovern ed +Ġpost p +ĠW aters +in ement +Ġund ocumented +ĠCom cast +Ġf ox +Ġassault s +re on +man y +ĠJen kins +ĠAny way +Ġassess ments +Ġdown s +ĠM ouse +Ġsuper b +k t +ĠD ow +Ġtax ation +4 01 +Ġsm iles +Ġundert aken +Ġex h +Ġenthusi astic +Ġtw ent +Ġgovernment al +Ġautonom y +ĠTechn ologies +ĠCh ain +Ġpreval ent +f b +Ġnic otine +og ram +j ob +Ġawa iting +ĠMen u +Ġdep uties +k ov +ish ops +But ton +ĠShan ghai +Ġdies el +ĠD uck +R yan +ĠPC s +N F +j ury +ent e +Ġinacc urate +edd y +Wh atever +Ġshow c +ĠN ad +od us +et r +Ġplaint iffs +ĠW OR +ĠAss ange +Ġpriv at +Ġpremium s +Ġt am +UR L +Ġel ites +ĠR anger +otten ham +ĠH off +ĠAt hens +Ġdefin ite +Ġs ighed +Ġeven ly +2 11 +ĠAm ber +ak ia +Ġmail ing +Ġcr ashing +ĠConfeder ate +ru gged +W al +ĠDep ths +Ġjuven ile +Ġreact or +Introdu ction +ĠDel uxe +19 95 +ĠS anchez +ĠM ead +iv able +: - +ĠPlan ning +ĠT rap +qu in +ĠProt ect +ve red +In formation +Ġkid ney +inn amon +l as +Ġpolic ing +Ġtoler ate +ĠQ i +Ġbi ased +F ort +ĠK i +s ave +Ġprivile ged +Ġbe asts +ĠGl as +ĠC inem +Ġcome back +Sund ay +Ġext inction +h ops +Ġtrans mit +Ġdoub les +ĠFl at +16 7 +Ġdis puted +Ġinjust ice +f oo +V ict +role um +ĠJul ie +Con text +ĠR arity +iss ue +Comp onent +Ġcounsel ing +an ne +d ark +Ġobject ions +u ilt +Ġg ast +Ġpl ac +Ġun used +ãĥ ĩ +ĠT rial +ĠJ as +hed ral +ob b +Ġtempor al +ĠPR O +ĠN W +ĠAnn iversary +L arge +Ġther m +Ġd avid +Ġsystem ic +ĠSh ir +m ut +ĠNe pt +add ress +Ġscan ning +Ġunderstand able +Ġcan vas +C at +ĠZ oo +Ġang els +L O +ĠStat ement +ĠS ig +ov able +ĠA way +sh aring +ocr ats +st ated +Ġweigh ing +N or +w ild +B ey +Ġaston ishing +ĠReyn olds +Ġop ener +Ġtrain er +Ġsurg ical +p n +Ġadjust ing +whe el +Ġf rown +erv ative +Ġsusp end +With in +te in +Ġobst acle +Ġliber ties +ym es +Ġur anium +ans om +an ol +ub a +ĠL oss +Ġa rous +ĠHend erson +W ow +s pl +c ur +ĠÂ Ń +Ġtheir s +Dam age +Ġdownload ing +Ġdisc ern +ĠSt o +ĠFl a +Ġh ath +ĠA j +Ġun pleasant +Europe an +exp ensive +Ġscreens hot +ĠU V +Ġall ied +ĠPers ian +Ġmonop oly +Ġat om +ĠReds kins +"> < +Ġcan cell +Ġcinem a +13 1 +f air +ĠAlf red +Ġd uck +arg s +22 3 +ĠIS I +Ġsign aling +in ar +Ġlaugh s +Ġfor wards +Ġreck less +Ġlisten ers +at ivity +Ġvast ly +n ant +L ess +ĠHun ting +ĠScient ific +IT ED +Ġkn ight +ĠH TC +us a +t mp +Ġr ude +ĠLegend ary +Ġar ises +B ad +ĠCl aim +pe g +Ġreal ities +Th ink +Ġ ° +Ġro de +Ġstri ve +Ġan ecd +Ġshort s +Ġhypot hes +Ġcoord inated +ĠGand hi +ĠF PS +R ED +Ġsuscept ible +Ġshr ink +ĠCh art +Hel p +Ġ ion +de ep +rib es +ĠK ai +ĠCustom er +Sum mary +Ġc ough +w ife +Ġl end +Ġposition ing +Ġlot tery +ĠC anyon +Ġf ade +Ġbron ze +ĠKenn y +Ġbo asts +ĠEnh anced +rec ord +Ġemer gence +Ġa kin +ĠB ert +it ous +âĸ ij +Ġst ip +Ġexch anged +om ore +als h +Ġreserv oir +Ġstand point +W M +Ġiniti ate +Ġdec ay +Ġbrew ery +Ġter ribly +Ġmort al +lev ard +Ġrev is +N I +el o +Ġconf ess +ĠMS NBC +Ġsub missions +Cont roller +Ġ20 2 +ĠR uth +} ); +ĠAz ure +Ġ ." +20 6 +ĠMarket ing +Ġl aund +ien cies +Ġrenown ed +ĠT rou +ĠN GO +ble ms +Ġterr ified +Ġwar ns +Ġper t +Ġuns ure +4 80 +ale z +ult z +ĠOut side +Ġst yl +ĠUnder ground +Ġp anc +Ġd ictionary +Ġf oe +rim inal +ĠNor wegian +Ġj ailed +Ġm aternal +é e +ĠLu cy +c op +Ch o +Ġuns igned +ĠZe lda +ĠIns ider +ĠContin ued +Ġ13 3 +ĠNar uto +ĠMajor ity +16 9 +ĠW o +ãĤ ĵ +Ġpast or +Ġinform al +Ð ½ +an throp +jo in +ãģ Ĺ +it ational +N P +ĠWrit ing +f n +ĠB ever +19 5 +Ġy elling +Ġdr astically +Ġe ject +Ġne ut +Ġth rive +ĠFre qu +ou x +Ġpossess es +ĠSen ators +ĠD ES +ĠSh akespeare +ĠFran co +ĠL B +uch i +Ġinc arn +Ġfound ers +F unction +Ġbright ness +ĠB T +Ġwh ale +ĠThe ater +m ass +ĠD oll +S omething +Ġecho ed +ĠHe x +c rit +af ia +Ġgodd ess +Ġele ven +ĠPre view +ĠAur ora +Ġ4 01 +uls ive +ĠLog an +in burgh +ĠCent ers +ĠON LY +ĠA id +Ġparad ox +Ġh urd +ĠL C +D ue +c ourt +Ġoff ended +Ġeval uating +ĠMatthew s +Ġto mb +Ġpay roll +Ġextra ction +ĠH ands +if i +Ġsuper natural +ĠCOM M +] = +dog s +Ġ5 12 +ĠMe eting +Rich ard +ĠMax imum +Ġide als +Th ings +m and +ĠReg ardless +Ġhum ili +b uffer +L ittle +ĠD ani +ĠN ak +Ġliber ation +ĠA be +ĠO L +Ġstuff ed +ac a +ind a +raph ic +Ġmos qu +Ġcampaign ing +Ġoccup y +S qu +r ina +ĠW el +ĠV S +Ġphys ic +Ġp uls +r int +oad ed +ET F +ĠArch ives +Ġven ues +h ner +ĠTur bo +Ġl ust +Ġappeal ed +que z +il ib +ĠTim othy +Ġo mn +d ro +Ġobs ession +ĠSav age +19 96 +Gl obal +J es +2 14 +Ġsl iding +Ġdisapp ro +ĠMag ical +Ġvolunt arily +g b +ane y +Ġprop het +ĠRe in +ĠJul ia +ĠW orth +aur us +Ġb ounds +ie u +)) ) +Ġcro re +ĠCitiz en +S ky +Ġcolumn ist +Ġseek ers +ond o +IS A +ĠL ength +Ġnost alg +Ġnew com +Ġdet rim +ent ric +3 75 +ĠG E +Ġaut op +Ġacadem ics +App Data +ĠS hen +Ġid iot +ĠTrans it +Ġteasp oon +W il +K O +ĠCom edy +> , +Ġpop ulated +W D +Ġp igs +ĠO culus +Ġsymp athetic +Ġmar athon +19 8 +Ġseiz ure +s ided +Ġd op +irt ual +L and +ĠFl oor +osa urs +... ] +Ġl os +Ġsubsid iary +E Y +ĠPart s +ĠSt ef +ĠJud iciary +Ġ13 4 +Ġmir rors +Ġk et +t imes +Ġneuro log +Ġc av +ĠGu est +Ġtum or +sc ill +ĠLl oyd +E st +Ġcle arer +Ġstere otypes +Ġd ur +not hing +Red dit +Ġnegoti ated +---------------- -------- +23 5 +Ġfl own +ĠSe oul +ĠRes ident +ĠS CH +Ġdisappear ance +ĠV ince +g rown +Ġgrab s +r il +ĠInf inite +ĠTw enty +Ġpedest rian +Ġjer sey +ĠF ur +ĠInf inity +ĠEll iott +Ġment or +Ġmor ally +Ġob ey +sec ure +iff e +Ġantib iotics +ang led +ĠFre eman +ĠIntrodu ction +J un +Ġm arsh +ic ans +ĠEV ENTS +och ond +W all +icult y +Ġmisdem eanor +Ġl y +Th omas +ĠRes olution +Ġanim ations +ĠD ry +Ġinter course +ĠNew castle +ĠH og +ĠEqu ipment +17 7 +Ġterrit orial +Ġarch ives +20 3 +Fil ter +ĠMun ich +Ġcommand ed +ĠW and +Ġpit ches +ĠCro at +Ġrat ios +ĠM its +Ġaccum ulated +ĠSpecific ally +Ġgentle man +acer b +Ġp enn +Ġa ka +ĠF uk +Ġinterven e +ĠRef uge +ĠAlz heimer +Ġsuccess ion +oh an +d oes +L ord +Ġsepar at +Ġcorrespond ence +Ġsh iny +P rior +Ġs ulf +Ġmiser able +Ġded ication +( ). +Ġspecial ists +Ġdefect s +ĠC ult +ĠX ia +Ġje opard +ĠO re +Ab ility +Ġle ar +Ġamb itions +ĠB MI +ĠArab s +Ġ19 42 +Ġpres ervation +ific ate +Ġash amed +l oss +ĠRest aur +Ġrese mble +Ġen rich +ĠK N +ĠCl an +fl oat +Ġplay able +IT T +Ġharm ony +arr ison +ĠWe instein +w ere +Ġpoison ing +ĠCom put +ĠWord Press +m ajor +ĠVal ve +F an +ĠTh row +ĠRom ans +ĠDep ression +ad os +Ġtort ured +Ġbal ancing +bott om +Ġacqu iring +ĠMon te +ard i +Ġa ura +Ġ# # +ĠStand ing +ĠAtl as +C F +Ġintr ins +ĠBen ghazi +Ġcamp ing +Ġt apped +bl ade +st rous +ĠR abb +ĠW ritten +t ip +ĠNe igh +ster dam +ĠAll ow +ĠHe aling +ĠR hod +n um +Ġcaffe ine +ĠPer cent +Ġbo o +Ġapp les +30 5 +Ġwel coming +Ġappl aud +Ġa usterity + ± +ĠRe ality +ef e +å ® +Ġsu cks +Ġtab s +ĠPay Pal +Ġback pack +Ġgif ted +abul ary +ĠSc out +ir teen +Ġch in +Ġo mitted +Ġnegative ly +Ġaccess ing +ĠE arn +Ġambul ance +Ġhead phones +Ġ20 5 +ĠRef resh +p resident +ĠKit chen +ĠEnt ered +ĠS nyder +00 5 +om ical +Ġborrow ed +ĠN em +Ġav iation +Ġst all +rim ination +Ġuniform s +it ime +ĠSim mons +ener gy +ab lished +y y +qual ified +Ġrall ies +ĠSt uart +fl ight +Ġgang s +r ag +Ġv ault +lu x +ĠCom par +Ġdesign ation +20 9 +ĠJ os +d ollar +z ero +Ġwell s +30 3 +Ġconstitu ents +Ġhe ck +Ġc ows +Ġcommand ers +Ġdifferent ial +ĠC atherine +29 9 +Ġval ve +Ġbr ace +Ġperspect ives +c ert +f act +icular ly +ĠMc N +pl anes +Ġint ric +Ġpe as +ov an +Ġtoss ed +ret ch +ĠL opez +Ġunf amiliar +de ath +ĠA part +ĠCh ang +Ġrelie ved +rop he +Ġair ports +Ġfre ak +ut il +M ill +ĠCh in +ĠOw en +m ale +ĠBro ken +ĠWind s +ro b +r ising +Ġfire fighters +Ġauthor itarian +Ġ14 8 +Bit coin +ex ternal +Ġbrow sers +iche ver +or ian +Ġun b +Ġpo ke +ĠZ ot +M id +ĠPop ular +Ġco vert +Ġcont ributes +Ġ6 50 +Ġcont ention +G ate +Ġcons oles +Ġchrom os +ĠI X +Ġvis ually +ĠE isen +Ġjewel ry +Ġdeleg ation +Ġacceler ate +ĠR iley +Ġsl ope +Ġind oor +it ially +Ġhuge ly +Ġtun nels +Ġfin ed +Ġdirect ive +Ġfore head +ustom ed +Ġsk ate +Mus ic +g as +Ġrecogn izing +am bo +Ġover weight +ĠGr ade +Ù Ĭ +Ġsound ing +Ġlock ing +ĠR EM +St ore +Ġexc av +ĠLike wise +ĠL ights +Ġel bow +ĠSupp ly +w ic +Ġhands ome +19 94 +C oll +Ġadequ ately +ĠAssoci ate +Ġstri ps +Ġcrack down +Ġmar vel +ĠK un +Ġpass ages +@@ @@ +ĠT all +Ġthought ful +names e +Ġprost itution +bus iness +Ġball istic +person al +c ig +iz ational +R ound +ĠÂłĠÂł ĠÂłĠÂł +ĠCole man +Ġadm itting +ĠPl ug +Ġbit coins +ĠSu z +Ġfair ness +Ġsupp lier +Ġcatast rophic +ĠHel en +o qu +M arc +ĠArt icles +g ie +Ġend angered +Ġdest iny +ĠVol t +ol ia +ax is +Ġche at +Ġun ified +IC O +qu ote +30 2 +ĠS ed +Ġsupp ression +Ġanaly zing +Ġsqu at +Ġfig uring +Ġcoordin ates +Ġch unks +Ġ19 46 +Ġsub p +Ġw iki +ĠFor bes +ĠJ upiter +ĠE rik +im er +ĠCom mercial +\ ) +Ġlegitim acy +Ġd ental +ĠMe an +Ġdefic its +5 50 +Orig inally +ĠHor ror +Ġcontam ination +ll ah +Ġconf isc +ĠCl are +T B +ĠF ailed +an ed +Ġrul er +ĠCont roller +Ġfemin ists +F ix +g ay +20 7 +Ġr abbit +Th ird +ownt own +Ġgl ue +Ġvol atile +Ġsh ining +Ġf oll +Ġimp aired +Ġsup ers +æ Ī +Ġcl utch +ļé ĨĴ +Ġpro let +Ġ( ! +Ġy elled +ĠK iev +ĠEr n +ĠSh ock +K B +Ġsit uated +qu ery +ĠN as +Ġan nex +char acter +ĠHol iday +Ġautom ation +ĠJ ill +ĠRem astered +Ġl inem +Ġwild erness +ĠHor izon +ĠGu inea +A Z +Ġmain land +Ġsec recy +LE ASE +Ġp unk +ĠProv ince +( ), +Spe ed +Ġhand ing +ĠSeb ast +S ir +r ase +Ġj ournals +Ġcon gest +ĠT ut +ir rel +Ġschizophren ia +Ġmis ogyn +health y +I ron +Ġreact ed +- $ +25 2 +Ġpl ural +Ġpl um +Ġbarg ain +Ġground ed +f inder +Ġdis se +ĠL az +O OD +Ġat roc +F actory +Ġmin ions +Ġo ri +ĠB rave +ĠP RE +ĠMy anmar +ĠH od +Ġexped ition +Ġexpl ode +ĠCo ord +Ġext r +ĠB rief +ĠAD HD +Ġhard core +feed ing +Ġd ile +ĠF ruit +Ġvacc ination +ĠM ao +osp here +Ġcont ests +- | +Ġf ren +isp here +R om +ĠSh arp +ĠTre nd +Ġdis connect +âĢ¢ âĢ¢ +Ġper secution +Ear th +Ġhealth ier +38 4 +Ġc ob +ĠTr inity +OW S +AN N +Ġspecial ty +Ġg ru +Ġcooper ative +wh y +Start ing +ĠIss ues +st re +ens or +Ġ18 5 +Ad v +! ? +ĠRe vel +em ia +ĠH ulk +Ġcelebr ations +ĠS ou +ra ud +ĠKle in +Ġun real +con text +Ġpartners hips +Ġadop ting +t ical +Ġspl ash +ĠHe zbollah +c ategory +cycl op +xt on +ĠD ot +urd y +t z +Ġenvelop e +ĠN L +â ķ +Ġwhere in +Spe c +18 4 +Ġte lev +al iation +Ġmyth s +å ° +Ġrig orous +Ġcommun icating +Ġobser ver +Ġre he +ĠW ash +Ġapolog ized +ĠT in +Ġexpend itures +work ers +d ocument +Ġhes itate +ĠLen in +Ġunpredict able +Ġrenew al +cl er +ok ia +ĠCON T +Ġpost season +Tok ens +Ġex acerb +Ġbet ting +Ġ14 7 +Ġelev ation +W ood +ĠSol omon +19 4 +00 4 +out put +Ġredu nd +ĠM umbai +Ġp H +Ġreprodu ce +ĠD uration +MA X +Ġb og +C BS +ĠBal ance +ĠS gt +ĠRec ent +Ġc d +Ġpo pped +Ġincomp et +pro p +ay an +g uy +Pac ific +Ġty r +Ġ{ { +ĠMy stic +ĠD ana +Ġmast urb +Ġge ometry +à ¢ +ĠCor rect +Ġtraject ory +Ġdistract ed +Ġf oo +ĠW elsh +L uc +m ith +Ġrug by +Ġrespir atory +Ġtri angle +Ġ2 15 +Ġunder graduate +ĠSuper ior +ch anging +_ - +Ġright ly +Ġrefere e +Ġluc rative +Ġun authorized +Ġresemb les +ĠGN U +ĠDer by +Ġpath ways +ĠL ed +Ġend urance +Ġst int +Ġcollect or +F ast +Ġd ots +Ġnational s +ĠSec urities +Ġwh ip +Par am +Ġlearn s +M agic +Ġdetail ing +m oon +Ġbroadcast ing +Ġb aked +26 5 +hol m +ĠS ah +ĠHus sein +ĠCourt esy +17 4 +Ġ14 6 +Ġge ographic +pe ace +Ġjud ging +ĠS tern +B ur +Ġstory line +G un +ĠSt ick +24 5 +30 7 +ãĤ´ ãĥ³ +ĠAdminist rator +Ġbur nt +Ġp ave +ch oes +Ex ec +Ġcamp uses +Res ult +Ġmut ations +ĠCh arter +Ġcapt ures +Ġcomp ares +Ġbad ge +S cient +Ġer ad +ier y +o i +ett es +ĠE state +Ġst rap +Ġproud ly +Ġf ried +Ġwithd rawn +ĠV oy +ph ony +It ems +ĠP ierce +b ard +Ġann otation +ant on +ill on +Im pro +... ) +Ġhapp ier +---- -- +ad just +Ġstaff ers +Ġactiv ism +Ġper f +Ġal right +N eed +Ġcomm ence +Ġopio id +ĠAm anda +E s +ĠP ars +ĠK aw +W orks +24 8 +Ġind o +t c +end ant +ĠM oto +Ġlegal ization +OT E +Ġtask ed +Ġt sp +ĠACT IONS +16 6 +Ġrefres hing +ĠN R +ĠPere z +Ġinfring ement +S Y +List en +in ning +k u +Ġrot ate +pro gram +ar ah +Des ign +Ġ( £ +Ġst oring +Ġwar rants +Ġjud gement +ĠB rist +us ually +ph oto +ĠR an +ĠP ine +Ġoutrage ous +ĠValent ine +lu ence +ĠEvery body +Al tern +Ġrele vance +Ġtermin ated +Ġd essert +Ġfulf illed +Ġprosecut ed +ĠW ords +Ġm igrant +Ġcultiv ation +ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ +idel ity +ĠV ern +ĠLog in +Ġmetaph or +ĠT ip +Ġrecru its +ĠP ig +rib ing +Ġenthusi asts +ex per +Ġfright ening +ĠH air +ans on +str ate +Ġh i +He ight +Ġown ing +n one +Ġdis like +Ġkn ives +pher d +Ġloud ly +ĠAP Is +Dis play +ĠL ac +ĠUS S +ab l +ver ages +J ew +Ġ17 2 +ĠHist orical +at oon +ĠPhys ics +in tern +Ġwarm th +Ġto pp +D M +Ġgun man +Ġem peror +od i +ãĥ £ +in atory +ĠR ib +Ġ13 1 +ĠSat urn +ĠSh ining +Ġw aking +Qu otes +Ġcomed ian +en berg + ½ +Ġbelie vers +Ġpaper work +c ustom +Ġle v +Ġl ament +Ġpour ing +22 2 +p olitical +ĠSupp lement +m aid +Ġcruel ty +Ġt read +ys ics +A w +rit es +Ġmod ifier +ĠP osition +Ad am +l b +ub s +Ġimper fect +Ġcl usters +ĠEngine er +ĠC herry +Ġinaug uration +ĠS au +Ġembod iment +ĠUn cle +Ġover r +Ġexplos ions +c ule +ĠPrinc eton +ĠAndre a +Ġincorrect ly +Ġearn est +Ġpil gr +ĠS print +Ġslee ve +Ġhe ars +ĠAm azing +Ġbrow sing +ag in +Ġhom eland +Ġha w +Ġd iving +ist ered +17 8 +Ġbarg aining +ĠArc ade +Ġdeleg ate +ters on +................................ ................................ +ĠJackson ville +27 5 +Ġst agn +Ġad am +ĠSher man +C B +Ġsub urb +ĠFood s +Ġconver ting +ĠAr ist +Ġch ambers +l ove +Ġam ino +ĠG an +Ġmad ness +m c +ĠUS E +def ined +Ġul tr +ind ust +Ġw olves +l ance +Add itionally +Ġcr acks +as ia +ĠRe ason +ĠP ump +Ġaccident al +ĠL aser +ĠR id +Ġinitial ized +ell i +Ġun named +Ġn oun +ĠPass ed +Ġhost age +ĠEth iop +sh irts +Ġun rel +ĠEmb assy +Ġ19 41 +Ġat oms +Ġpur ported +16 4 +ĠF i +Ġgall ons +ĠMon ica +Ġp g +en ment +Ġsort ed +ĠG ospel +Ġhe ights +Ġtr aced +Ġunder going +She ll +Ġs acks +Ġproport ions +Ġhall uc +F ont +ac et +Ġwar mer +ĠIN TER +Ġgrab bing +Pl ug +Ġreal ization +ĠBur ke +Ġen chant +AT ER +ĠSe ed +Ġabund ant +F M +Ġc ivic +V s +is i +Ġv ow +Ġre per +ĠPartners hip +Ġpenet ration +Ġax e +Ġsh attered +ĠZ ombies +Ġv inyl +ĠAl ert +e on +Ġoblig ed +ĠIll ust +ĠPl aza +ĠFront ier +Ġdavid jl +ĠSer ial +ĠH av +ĠNut rition +B i +Ġâĸ Ī +ĠJ ays +lin ux +Ġhur ry +Ġv oy +Ġhop eless +ĠSte alth +Ġ ãģ +ess ors +tt le +b org +ĠSaf ari +f ell +Ġw ary +d ue +ĠAb ove +H a +E LL +Ġnot or +ĠW on +T oo +Ġoccup ations +Ġposs essions +Ġinv iting +Ġpred ators +Ġacceler ated +Ġ15 7 +uter te +ĠC ube +e ast +acc ount +G ive +Ġtrans plant +red ients +id able +Ġscreens hots +ĠG und +ĠF S +Ġtravel ers +Ġsens ory +ĠF iat +ĠRock ets +İ ĭ +_ { +F riend +Ġchar ming +AL S +Ġenjoy ment +m ph +Ġ5 000 +ĠRE G +Ù Ĩ +b ia +Ġcomp ilation +ro st +ĠV P +ĠSch ne +201 9 +Ġcop ying +M ORE +ĠFl ore +f alls +2 15 +t otal +Ġdis ciples +d ouble +Ġexceed ing +Ġsm ashed +Ġconcept ual +ĠRom ania +ĠB rent +ĠI CE +ĠT ou +Ġg rap +Ġn ails +18 9 +ãĥ ĺ +Ġproc ure +e ur +Ġconfir ming +ĠC ec +aw i +ĠEd en +Ġn g +Ġengine ered +at ics +Ġhook ed +Ġdisgust ing +ĠMur der +ãĤ ¿ +L ibrary +Ġ16 8 +Al most +hem atic +Men u +ĠNot re +ĠJ ur +Ġkidn apped +Ġhack er +ĠJ ade +Ġcreep y +Ġdraw ings +ĠSpons or +Ġcycl ists +ĠGob lin +Ġoptim ized +Ġst aged +ĠMc D +bet ween +A ge +en o +S ex +ĠW ide +n ings +av is +Ġincap able +ĠK ob +Ġreward ing +ĠL one +oles cent +Ġcontract ed +Ġstick y +J ose +B all +f est +ĠIn put +ĠRec ently +Ġto mat +squ are +App lication +Ġnit rogen +Ġdupl icate +ĠRec on +ĠD ear +L ondon +Ġint ra +Ġd ock +Ġout reach +ĠM illion +Ġmamm als +am pton +V AL +Ġsn aps +Ġd os +ĠWh ole +ĠRead y +T ry +ĠWinn ipeg +ear ance +Ġinc urred +ren ched +ĠNS W +il ot +rain e +Ġc ube +g ot +Ġrun way +etermin ed +ĠHaw ks +Ġsurviv or +ĠW ish +ĠD in +ĠDE F +ĠV ault +18 7 +Ġmush rooms +Ġcris p +be y +ĠDisco very +Ġdevelopment al +Ġparad igm +Ġcha otic +ĠT su +Ġ3 33 +b ons +Ġbacter ial +Ġcomm its +Ġcos mic +Ġme ga +oc ative +ĠP aint +ophob ic +Ġv ain +Ġcar ved +ĠTh ief +ĠG ul +ows hip +Ġc ites +ĠEd inburgh +Ġdimin ished +Ġacknowled ges +ĠK ills +Ġmic row +ĠHer a +Ġsen iors +Ġwhere by +H op +at ron +Ġun available +ĠN ate +Ġ4 80 +Ġsl ated +ĠRe becca +ĠB attery +Ġgram mar +Ġhead set +Ġcurs or +Ġex cluding +any e +aunder ing +eb in +Ġfeas ible +ĠPub lishing +ĠLab s +ĠCl iff +ĠFerr ari +Ġp ac +vis ible +mark ed +pe ll +Ġpol ite +Ġstagger ing +ĠGal actic +Ġsuper st +Ġpar an +ĠOffic ers +ãĢ ģ +Ġspecific s +ul us +23 9 +ĠP aste +AM P +ĠPan ama +ĠDe lete +angu ard +rest rial +Ġhero ic +ĠD y +ا ÙĦ +Ġincumb ent +Ġcr unch +t ro +Ġsc oop +Ġblog ger +Ġsell ers +ure n +Ġmedic ines +ĠC aps +ĠAnim ation +ox y +Ġout ward +Ġinqu iries +22 9 +Ġpsych ologist +ĠS ask +ev il +Ġcontam inated +ãĤ ¨ +he rence +Ġbrand ed +ĠAbd ul +z h +Ġparagraph s +Ġmin s +Ġcor related +er b +Ġimp art +Ġmil estone +ĠSol utions +ot le +Ġunder cover +Ġmar ched +ĠCharg ers +f ax +ĠSec rets +Ġr uth +we ather +Ġfemin ine +Ġsh am +Ġprest igious +igg ins +Ġs ung +hist ory +ett le +gg ie +Ġout dated +ol and +Ġper ceptions +ĠS ession +ĠDod gers +u j +ĠE ND +D oc +Ġdefic iency +Gr and +ĠJ oker +Ġretro spect +Ġdiagn ostic +Ġharm less +Ġro gue +ĠA val +E qu +Ġtrans c +ĠRoberts on +ĠDep ending +ĠBurn s +iv o +Ġhost ility +F eatures +ĵ ĺ +Ġdis comfort +ĠL CD +spec ified +ĠEx pect +3 40 +Ġimper ative +ĠReg ular +Ch inese +Ġstate wide +Ġsy mm +Ġlo ops +Ġaut umn +N ick +Ġsh aping +Ġqu ot +Ġc herry +ĠCross ref +è¦ ļéĨĴ +Stand ard +he ed +ĠD ell +ĠViet namese +Ġo st +ĠV alkyrie +O A +Ass ad +Ġreb ound +ĠTra ffic +pl aces +æ ĺ +ĠB uc +17 2 +Ġshel ters +Ġins isting +ĠCertain ly +ĠKenn eth +ĠT CP +Ġpen al +ĠRe play +he ard +Ġdial ect +iz a +ĠF Y +it cher +ĠD L +Ġspir al +Ġquarterback s +Ġh ull +Ġgo ogle +Ġto dd +ĠSter ling +ĠPl ate +Ġsp ying +mb ol +ĠReal m +ĠPro ced +ĠCr ash +Ġtermin ate +Ġprotest ing +C enter +gu ided +Ġun cover +Ġboy cott +Ġreal izes +s ound +Ġpret ending +ĠV as +19 80 +Ġfram ed +Ġ13 9 +Ġdesc ended +Ġrehab ilitation +Ġborrow ing +ĠB uch +Ġbl ur +R on +ĠFro zen +en za +Ch ief +ĠP oor +Ġtransl ates +M IN +Ġ2 12 +J ECT +Ġerupt ed +Ġsuccess es +S EC +Ġpl ague +Ġg ems +d oms +Ġstret ches +ĠSp y +Ġstory telling +C redit +ĠP ush +Ġtra ction +Ġin effective +ĠL una +Ġt apes +Ġanaly tics +erc ise +Ġprogram mes +ĠCar bon +Ġbeh old +he avy +ĠConserv ation +ĠF IR +Ġs ack +ter min +ric ks +Ġhous ed +Ġunus ually +I ce +Ġexecut ing +ĠMor oc +ed ay +Ġed itions +Ġsm arter +ĠB A +Ġout law +Ġvan ished +ib a +AL SE +ĠSil va +23 8 +C ould +Ġphilos opher +Ġevac uated +Sec ret +14 2 +Ġvis as +ãĤ ¬ +ĠM alt +ĠClear ly +ĠN iger +ĠC airo +ĠF ist +3 80 +ĠX ML +aut o +it ant +Ġrein forced +Rec ord +ĠSurviv or +G Hz +Ġscrew s +parent s +Ġo ceans +ma res +Ġbra kes +vas ive +Ġhell o +ĠS IM +rim p +Ġo re +ĠArm our +24 7 +Ġterr ific +Ġt ones +14 1 +ĠMin utes +Ep isode +Ġcur ves +Ġinflamm atory +Ġbat ting +ĠBeaut iful +L ay +Ġunp op +v able +Ġr iots +ĠTact ics +b augh +ĠC ock +Ġorg asm +ĠS as +Ġconstruct or +et z +G ov +Ġant agon +Ġthe at +Ġde eds +ha o +c uts +ĠMc Cl +Ġu m +ĠScient ists +Ġgrass roots +ys sey +"] => +Ġsurf aced +Ġsh ades +Ġneighb ours +Ġad vertis +oy a +Ġmer ged +Up on +Ġg ad +Ġanticip ate +Any way +Ġsl ogan +Ġdis respect +I ran +ĠT B +act ed +Ġsubp oen +medi ately +OO OO +Ġwa iver +Ġvulner abilities +ott esville +ĠHuff ington +J osh +ĠD H +M onday +ĠEll en +K now +x on +it ems +22 8 +Ġf ills +ĠN ike +Ġcum ulative +and als +I r +Ġ ì +Ġfr iction +ig ator +Ġsc ans +ĠVi enna +ld om +Ġperform ers +P rim +Ġb idding +M ur +Ġlean ed +ĠPri x +al ks +Ġ[ âĢ¦] +ĠTw itch +ĠDevelop er +ĠG ir +Ġcall back +Ab stract +Ġacc ustomed +Ġfreed oms +ĠP G +ur acy +Ġl ump +is man +,, ,, +19 92 +ĠR ED +Ġwor m +M atch +ĠPl atinum +I J +ĠOwn er +Tri via +com pl +Ġnew born +Ġfant as +O wn +Ġ19 59 +Ġsymp ath +Ġub iqu +Ġoutput s +Ġal lev +Ġpr ag +K evin +Ġfav ors +Ġbur ial +Ġn urt +so lete +c ache +Ġ15 6 +Ġunl ocks +te chn +M aking +Ġcon quer +ad ic +æ ĸ +Ġel f +Ġelect orate +ĠKurd s +ĠSt ack +ĠSam urai +Ġâ ĺħ +Ġ{ } +ĠS aid +ĠFall out +Ġkind ness +ĠCustom s +ĠBou levard +Ġhelicop ters +ot ics +ĠVe get +com ment +Ġcritic ised +Ġpol ished +ĠRem ix +ĠC ultural +Ġrec ons +Ġdo i +at em +Sc reen +Ġbar red +Com ments +ĠGener ally +Ġsl ap +7 20 +V ari +p ine +Ġem pt +Ġh ats +ĠPlay ing +l ab +a verage +form s +ĠC otton +Ġcan s +ĠD ON +ĠSom alia +C rypt +ĠIncre ases +E ver +mod ern +Ġsur geon +3 000 +Ġrandom ized +================================ ================================ +B ern +im pl +ĠC OR +Ġpro claim +th ouse +Ġto es +Ġam ple +Ġpres erving +Ġdis bel +gr and +B esides +Ġsil k +ĠPat tern +h m +Ġenter prises +Ġaffidav it +ĠAdvis ory +Ġadvert ised +ĠRel igious +se ctions +psy ch +ĠField s +aw ays +Ġhasht ag +ĠNight mare +Ġv ampire +Ġfore nsic +rosso ver +n ar +Ġn avy +Ġvac ant +ĠD uel +Ġhall way +Ġface book +ident ally +ĠN RA +Ġm att +Ġhur ricane +ĠKir by +ĠP uzzle +Ġsk irt +ou st +du llah +Ġanal ogy +in ion +Ġtomat oes +ĠN V +ĠPe ak +ĠMe yer +Ġappoint ments +Ġm asc +Ġal ley +re hend +Ġchar ities +Ġund o +Ġdest inations +ĠTest ing +"> " +c ats +* . +Ġgest ures +gener al +Le ague +Ġpack ets +ĠInspect or +ĠBer g +Ġfraud ulent +Ġcritic ize +F un +Ġbl aming +nd ra +Ġsl ash +ĠE ston +Ġpropos ing +Ġwh ales +Ġtherap ist +Ġsub set +Ġle isure +EL D +ĠC VE +ĠAct ivity +Ġcul min +sh op +ĠD AY +is cher +ĠAdmir al +ĠAtt acks +Ġ19 58 +Ġmem oir +Ġfold ed +Ġsex ist +Ġ15 3 +ĠL I +Ġread ings +Ġembarrass ment +ĠEmploy ment +w art +ch in +Ġcontin uation +l ia +Rec ently +Ġd uel +Ġevac uation +ĠKash mir +Ġdis position +ĠR ig +Ġbol ts +Ġins urers +4 67 +M ex +Ġret aliation +Ġmis ery +Ġunre asonable +r aining +I mm +ĠP U +em er +Ġgen ital +ãĤ ³ +ĠC andy +Ġon ions +ĠP att +lin er +Ġconced ed +Ġf a +Ġfor c +ĠH ernandez +ĠGe off +deb ian +ĠTe ams +Ġc ries +Ġhome owners +23 7 +A BC +Ġst itch +Ġstat istic +Ġhead ers +ĠBi ology +Ġmot ors +ĠG EN +ĠL ip +Ġh ates +Ġhe el +S elf +i pl +ED IT +ort ing +Ġann ot +ĠSpe ech +old emort +ĠJ avascript +ĠLe Bron +Ġfoot print +Ġf n +Ġseiz ures +n as +h ide +Ġ19 54 +ĠBe e +ĠDecl aration +ĠKat ie +Ġreserv ations +N R +f emale +Ġsatur ated +Ġb iblical +Ġtroll s +Dev ice +ph otos +Ġdr ums +ãĥīãĥ© ãĤ´ãĥ³ +N ight +f ighter +ĠH ak +ri ber +Ġc ush +Ġdiscipl inary +ba um +ĠG H +ĠSch midt +ilib rium +Ġs ixty +ĠKush ner +ro ts +Ġp und +ĠR ac +Ġspr ings +Ġcon ve +Bus iness +F all +Ġqual ifications +Ġvers es +Ġnarc iss +ĠK oh +ĠW ow +ĠCharl ottesville +ed o +Ġinterrog ation +ĠW ool +36 5 +B rian +Ġâľ ĵ +Ġalleg es +ond s +id ation +ĠJack ie +y u +Ġl akes +Ġworth while +Ġcryst als +ĠJud a +Ġcomp rehend +Ġfl ush +Ġabsor ption +ĠO C +Ġfright ened +ĠCh ocolate +Mart in +Ġbu ys +Ġbu cks +Ġapp ell +ĠChampions hips +Ġlist ener +ĠDef ensive +Ġc z +ud s +ĠM ate +Ġre play +Ġdecor ated +Ġs unk +ĠV IP +ĠAn k +Ġ19 5 +aa aa +Nob ody +ĠMil k +ĠG ur +ĠM k +ĠS ara +Ġse ating +ĠW id +Tr ack +Ġemploy s +Ġgig antic +AP P +ãĤ § +in ventory +Ġtow el +at che +l asting +ĠT L +Ġlat ency +Ġkn e +B er +me aning +Ġup held +Ġplay ground +Ġm ant +S ide +Ġstere o +Ġnorth west +Ġexception ally +Ġr ays +Ġrec urring +D rive +Ġup right +Ġab duct +ĠMar athon +Ġgood bye +Ġal phabet +h p +Ġcourt room +ring ton +ot hing +T ag +Ġdiplom ats +Ġbar bar +ĠAqu a +18 3 +33 33 +Ġmat urity +Ġinst ability +ĠAp ache +Ġ= == +Ġfast ing +ĠGr id +Mod Loader +Ġ15 2 +A bs +ĠOper ating +ett i +Ġacqu aint +Don nell +ĠK em +ĠFor ge +Ġarm ored +M il +Ġphilos ophers +in vest +Pl ayers +â Ī +Ġmy riad +Ġcomr ades +R ot +Ġremember ing +Ġcorrespond s +Ġprogram mers +ĠLyn n +Ġo lig +Ġco herent +yn chron +ĠChem ical +Ġj ugg +p air +post s +E ye +ĠIn ner +Ġsem ester +ott est +ĠEmir ates +ric anes +or ously +m its +ĠW is +Ġd odge +l ocation +Ġf aded +Am azon +ĠPro ceed +ĠIN FO +j ournal +ĠTru ck +T en +Ġ2 17 +Ġstat utes +m obile +ĠT ypes +Rec omm +b uster +pe x +Ġleg ends +Ġhead ache +f aced +ĠWi Fi +if ty +ĠH ER +Ġcirc uits +ER ROR +22 6 +ol in +Ġcyl inder +osp ace +ik ers +P rem +Qu ant +Ġconflic ting +Ġslight est +Ġfor ged +ion age +Step hen +ĠK ub +ĠOpp ortun +ĠHe al +Ġbl o +Ġrul ers +Ġh uh +Ġsubmar ine +f y +ass er +Ġallow ance +ĠKas ich +ĠT as +ĠAustral ians +Forge ModLoader +ĠâĨ ij +ĠMat rix +am ins +Ġ12 00 +ĠAc qu +23 6 +D ocument +ĠBre aking +19 3 +ĠSub st +ĠRoll er +ĠPro perties +ĠN I +t ier +Ġcr ushing +Ġadvoc ating +Further more +keep ers +Ġsex ism +x d +Ġcall er +ĠS ense +chie ve +ĠT F +Ġfuel ed +Ġreminis cent +Ġobs ess +ur st +Ġup hold +ĠF ans +het ics +Ġâ Ĺ +ĠB ath +Ġbe verage +Ġo scill +25 4 +Ġpol es +Ġgrad ual +Ġex ting +ĠS uff +ĠS uddenly +Ġlik ing +Ġ19 49 +un ciation +am ination +ĠO mar +ĠL V +ĠCon sequently +Ġsynt hes +ĠG IF +Ġp ains +Ġinteract ing +u ously +inc re +Ġrum or +ĠScient ology +19 7 +ĠZ ig +Ġspe lling +ĠA SS +Ġexting u +ms on +Ġg h +Ġremark ed +ĠStrateg ic +ĠM ON +å ¥ +g ae +ĠWH AT +E ric +ĠCamp us +Ġmeth ane +Ġimag in +J UST +ĠAl m +X T +i q +ĠR SS +Ġwrong doing +att a +Ġbig ot +Ġdemonstr ators +ĠCal vin +ĠV illa +Ġmembr ane +ĠAw esome +Ġbenef ic +26 8 +Ġmagn ificent +ĠL ots +G reg +ĠBor is +Ġdetain ees +ĠH erman +Ġwhis pered +Ġa we +Prof essor +fund ing +Ġphys iological +ĠDest ruction +Ġlim b +Ġmanip ulated +Ġbub bles +Ġpse ud +Ġhyd ra +ĠBrist ol +Ġst ellar +ĠExp ansion +ĠK ell +ĠInterest ingly +Ġm ans +Ġdrag ging +Ġec ological +ĠF it +Ġg ent +Ġbenef ited +ĠHait i +Ġpoly g +ãĥ İ +Ġ20 30 +Ġpro w +Ġrecon struction +Ġwas t +Ġpsych ic +ĠGree ks +Hand ler +16 2 +ĠP ulse +Ġsol icit +Ġsy s +Ġinflu x +ĠG entle +per cent +Ġprolifer ation +Ġtax able +Ġdisreg ard +Ġesc aping +Ġg inger +Ġwith stand +Ġdevast ated +ĠD ew +ser ies +Ġinject ed +ela ide +Ġturn over +he at +Ļ Ĥ +H appy +ĠSil ent +ãĤ Ń +iv ism +Ġir rational +AM A +Ġre ef +r ub +Ġ16 2 +Ġbank ers +ĠEth ics +v v +Ġcritic isms +K n +18 6 +M ovie +ĠT ories +Ġno od +Ġdist ortion +F alse +od ore +Ġt asty +Res earch +ĠU ID +- ) +Ġdivor ced +ĠM U +ĠHay es +ĠIs n +ian i +ĠH Q +Ġ" # +ign ant +Ġtra umatic +ĠL ing +H un +Ġsab ot +on line +r andom +Ġren amed +ra red +K A +d ead +é t +ĠAss istance +Ġse af +++++ ++++ +Ġse ldom +ĠWeb b +Ġbo olean +u let +Ġref rain +ĠDI Y +ru le +Ġshut ting +Ġutil izing +load ing +ĠPar am +co al +oot er +Ġattract ing +ĠD ol +Ġher s +ag netic +ĠRe ach +im o +Ġdisc arded +ĠP ip +01 5 +ü r +Ġm ug +Im agine +C OL +Ġcurs ed +ĠSh ows +ĠCurt is +ĠSach s +spe aking +ĠV ista +ĠFram ework +ong o +Ġsub reddit +Ġcr us +ĠO val +R ow +g rowing +Ġinstall ment +Ġgl ac +ĠAdv ance +EC K +ĠLGBT Q +LE Y +Ġac et +Ġsuccess ive +ĠNic ole +Ġ19 57 +Qu ote +Ġcircumst ance +ack ets +Ġ14 2 +ort ium +Ġguess ed +ĠFr ame +Ġperpet rators +ĠAv iation +ĠBen ch +Ġhand c +A p +Ġ19 56 +25 9 +r and +Net Message +d in +urt les +h ig +ĠV III +ff iti +ĠSw ords +b ial +Ġkidn apping +dev ice +Ġb arn +ĠEl i +auc as +S end +Con structed +Ġ ½ +Ġneed les +Ġad vertisements +Ġv ou +Ġexhib ited +ĠFort ress +As k +B erry +TY PE +Ġcan cers +ump ing +ĠTerrit ory +Ġpr ud +Ġn as +Ġathe ist +Ġbal ances +ãģ Ł +ĠSh awn +& & +Ġland sc +ĠR GB +Ġpet ty +Ġex cellence +Ġtransl ations +Ġpar cel +ĠChe v +E ast +ĠOut put +im i +Ġamb ient +ĠTh reat +Ġvill ains +Ġ5 50 +IC A +Ġtall er +Ġle aking +c up +Ġpol ish +Ġinfect ious +ĠK C +Ġ@ @ +back ground +Ġbureaucr acy +ĠS ai +un less +it ious +ĠSky pe +At l +ID ENT +00 8 +Ġhyp ocr +Ġpit chers +Ġguess ing +ĠF INAL +Bet ween +Ġvill agers +Ġ25 2 +f ashion +ĠTun is +Be h +ĠEx c +ĠM ID +28 8 +ĠHas kell +19 6 +ĠN OR +Ġspec s +Ġinv ari +Ġgl ut +ĠC ars +Ġimp ulse +Ġhon ors +g el +Ġjurisd ictions +ĠBund le +ul as +Calif ornia +ĠIncre ase +Ġp ear +Ġsing les +Ġc ues +Ġunder went +ĠW S +Ġexagger ated +Ġdub ious +Ġfl ashing +L OG +) ]. +J ournal +t g +V an +ĠI stanbul +ĠIn sp +ĠFrank en +D raw +Ġsad ness +Ġiron ic +ĠF ry +x c +Ġ16 4 +is ch +W ay +ĠProtest ant +h orn +Ġun aff +ĠV iv +ill as +ĠProduct ions +ĠH ogan +Ġper imeter +ĠS isters +Ġspont aneous +Ġdown side +Ġdescend ants +Ġor n +w orm +Japan ese +Ġ19 55 +Ġ15 1 +ĠDo ing +els en +umb les +Ġrad ically +ĠDr um +ĠB ach +Ġli abilities +ĠO B +ĠElement ary +Ġmem e +yn es +Ġfinger print +ĠGr ab +Ġundert ake +Mem bers +ĠRead er +ĠSim s +g od +Ġhypot hetical +s cient +ĠA J +Ġchar ism +Ġad missions +ĠMiss ile +tr ade +Ġexerc ising +ĠBack ground +W ritten +Ġvoc als +whe ther +Ġv i +ĠW inner +Ġl itter +ĠSh ooting +ST EM +ãĤ ¡ +ĠA FL +Ġvari ability +Ġe ats +ĠD PS +b row +Ġeleph ants +Ġstr at +Ġ Å +Ġsett lers +Matt hew +Ġin advert +H I +ĠIM F +ĠGo al +Ġnerv es +John son +ey e +ablish ment +Th ursday +BIL ITY +H ad +am oto +het amine +ep s +Ġmit ochond +Ġcomp ressed +ĠTre vor +ĠAnim als +T ool +L ock +Ġtwe ak +Ġpin ch +Ġcancell ation +P ot +Ġfoc al +ĠAst ron +17 3 +ĠA SC +ĠO THER +umn i +Ġdem ise +d l +Ù ħ +Sem itism +Ġcr acking +Ġcollabor ative +Ġexpl ores +s ql +Ġher bs +Ġconfig urations +m is +ĠRes ult +ace y +ĠSm oke +Ġsan ct +el ia +Ġdeg ener +Ġdeep est +Ġscream ed +Ġn ap +Soft ware +ĠST AR +E F +ĠX in +spons ored +mans hip +23 3 +Ġprim aries +Ġfilter ing +Ġas semble +m il +ĠMy ers +b ows +Ġpun ched +M ic +Ġinnov ations +Ġfun c +and o +Ġfr acking +ĠV ul +о Ð +osh op +ĠIm mun +Ġsett ling +Ġadolesc ents +Ġreb uilding +Ġtransform ing +Ġpar ole +Ġhar bor +Ġbook ing +ot ional +onge vity +ĠY o +b ug +Ġemer ges +ĠMethod s +ĠCh u +P res +ĠDun geons +Ġtra iling +ĠR um +ĠH ugh +å¤ © +ĠE ra +ĠBatt les +Res ults +ĠTr ading +Ġvers a +c ss +ax ies +he et +Ġgre ed +19 89 +Ġgard ens +Ġconting ent +P ark +ĠLeaf s +h ook +ro be +Ġdiplom acy +ĠF uel +ĠInv asion +Ġupgr ading +M ale +Ġe lic +Ġrelent less +ĠCo venant +ap esh +ĠT rop +T y +pro duction +art y +Ġpun ches +ak o +cyclop edia +ĠR abbit +ĠHD MI +Ġ14 1 +Ġf oil +Item Image +ĠF G +Ġimplement ations +ĠP om +ixt ures +Ġaw ait +Ġ3 30 +am us +Ġumb rella +Ġfore see +se par +Ġcircum cision +Ġperipher al +S ay +ĠExper t +In c +Ġwithd rew +ĠAnd ers +f ried +Ġradio active +ĠOp ening +Ġboard ing +ĠN D +Ġover throw +Act iv +W P +ĠAct s +× Ļ +Ġmot ions +v ic +ĠM ighty +ĠDef ender +a er +Ġthank ful +ĠK illing +ĠBr is +mo il +Ġpredict ing +26 6 +ch oice +Ġkill ers +Ġinc ub +ĠChe st +ather ing +Ġpro claimed +fl ower +oss om +umbled ore +ĠCy cling +ĠOccup y +AG ES +P en +ĠY ug +Ġpack aged +Ġheight ened +c ot +st ack +C ond +Ġst amps +m age +Ġpersu aded +Ġens l +ĠCard inal +Ġsol itary +Ġpossess ing +ĠC ork +Ġev id +ĠT ay +Ġbl ues +Ġextrem ism +Ġlun ar +Ġcl own +Te chn +Ġfest ivals +ĠPv P +ĠL ar +Ġconsequ ently +p resent +Ġsom eday +ç İĭ +ĠMet eor +Ġtour ing +c ulture +Ġbe aches +S hip +c ause +ĠFl ood +ãĥ ¯ +Ġpur ity +th ose +Ġem ission +b olt +Ġch ord +ĠScript ure +L u +Ġ$ { +cre ated +Other s +25 8 +Ġelement al +Ġannoy ed +ĠA E +d an +ĠS ag +Res earchers +Ġfair y +âĢĵ âĢĵ +======== ==== +Sm art +GG GG +Ġskelet ons +Ġpup ils +link ed +Ġur gency +en abled +ĠF uck +Ġcoun cill +r ab +U AL +T I +Ġlif es +Ġconf essed +B ug +Ġharm on +ĠCON FIG +ĠNe utral +D ouble +Ġst aple +ĠSH A +Brit ish +ĠSN P +AT OR +oc o +Ġswing ing +ge x +ole on +pl ain +ĠMiss ing +ĠTro phy +v ari +ran ch +Ġ3 01 +4 40 +00000000 00000000 +Ġrest oring +Ġha ul +uc ing +ner g +Ġfut ures +Ġstrateg ist +quest ion +Ġlater al +ĠB ard +Ġs or +ĠRhod es +ĠD owntown +????? - +ĠL it +ĠB ened +Ġco il +st reet +ĠPort al +FI LE +ĠG ru +* , +23 1 +ne um +Ġsuck ed +Ġr apper +Ġtend encies +ĠLaure n +cell aneous +26 7 +Ġbrow se +Ġover c +head er +o ise +Ġbe et +ĠG le +St ay +Ġm um +Ġtyp ed +Ġdiscount s +T alk +ĠO g +ex isting +ĠS ell +u ph +C I +ĠAust rian +ĠW arm +Ġdismiss al +Ġaver ages +c amera +Ġalleg iance +L AN +=" # +Ġcomment ators +ĠSet ting +ĠMid west +Ġpharm ac +ĠEX P +Ġstain less +Ch icago +Ġt an +24 4 +Ġcountry side +ĠV ac +29 5 +Ġpin ned +Ġcr ises +Ġstandard ized +T ask +ĠJ ail +ĠD ocker +col ored +f orth +" }, +Ġpat rons +Ġsp ice +Ġm ourn +ĠM ood +Ġlaund ry +Ġequ ip +ĠM ole +y ll +ĠTH C +n ation +ĠSher lock +Ġiss u +ĠK re +ĠAmeric as +ĠA AA +Ġsystem atically +Ġcont ra +ĠS ally +Ġrational e +Ġcar riage +Ġpe aks +Ġcontrad iction +ens ation +ĠFail ure +Ġpro ps +Ġnames pace +Ġc ove +field s +ãĤ ĭ +Ġw ool +ĠC atch +Ġpresum ed +ĠD iana +r agon +ig i +Ġh amm +Ġst unt +ĠG UI +ĠObserv atory +ĠSh ore +Ġsmell s +ann ah +Ġcock pit +ĠD uterte +8 50 +Ġopp ressed +bre aker +ĠCont ribut +ĠPer u +ĠMons anto +ĠAtt empt +Ġcommand ing +Ġfr idge +ĠR in +ĠChe ss +ual ity +Ġo l +Republic an +ĠGl ory +ĠW IN +.... ... +ag ent +read ing +Ġin h +J ones +Ġcl icks +al an +Ġ[ ]; +ĠMaj esty +ĠC ed +op us +ate l +à ª +AR C +ĠEc uador +ãĥ ł +ĠK uro +Ġritual s +Ġcapt ive +Ġoun ce +Ġdisag reement +Ġsl og +f uel +P et +M ail +Ġexerc ised +Ġsol ic +Ġrain fall +Ġdev otion +ĠAss essment +Ġrob otic +opt ions +ĠR P +ĠFam ilies +ĠFl ames +Ġassign ments +00 7 +aked own +Ġvoc abulary +Re illy +Ġc aval +g ars +Ġsupp ressed +ĠS ET +ĠJohn s +Ġwar p +bro ken +Ġstat ues +Ġadvoc ated +Ġ2 75 +Ġper il +om orph +ĠF emin +per fect +Ġh atch +L ib +5 12 +Ġlif elong +3 13 +Ġche eks +Ġnum bered +ĠM ug +B ody +ra vel +We ight +ĠJ ak +ĠHe ath +Ġkiss ing +ĠJ UST +Ġw aving +u pload +Ġins ider +ĠPro gressive +ĠFil ter +tt a +ĠBe am +Ġviol ently +ip ation +Ġskept icism +Ġ19 18 +ĠAnn ie +ĠS I +Ġgen etics +Ġon board +at l +ĠFried man +ĠB ri +cept ive +Ġpir ate +ĠRep orter +27 8 +Ġmyth ology +Ġe clipse +Ġsk ins +Ġgly ph +ing ham +F iles +C our +w omen +Ġreg imes +Ġphotograp hed +K at +ĠMA X +Offic ials +Ġunexpected ly +Ġimpress ions +F ront +;;;; ;;;; +Ġsuprem acy +Ġs ang +Ġaggrav ated +Ġabrupt ly +ĠS ector +Ġexc uses +Ġcost ing +ide press +St ack +ĠR NA +ob il +Ġghost s +ld on +at ibility +Top ics +Ġreim burse +ĠH M +ĠDe g +Ġth ief +y et +ogen esis +le aning +ĠK ol +ĠB asketball +Ġf i +ĠSee ing +Ġrecy cling +Ġ[ - +Cong ress +Ġlect ures +P sy +Ġne p +Ġm aid +Ġori ented +A X +Ġrespect ful +re ne +fl ush +ĠUn loaded +re quest +gr id +ĠAltern atively +ĠHug o +Ġdec ree +ĠBuddh ism +and um +And roid +ĠCong o +ĠJoy ce +Ġacknowled ging +hes ive +ĠTom orrow +ĠH iro +th ren +ĠM aced +Ġho ax +ĠIncre ased +ĠPr adesh +W ild +____ __ +16 1 +Ġa unt +Ġdistribut ing +ĠT ucker +ĠSS L +ĠW olves +B uilding +ou lt +ĠLu o +ĠY as +ĠSp ir +ĠSh ape +ĠCamb od +ĠIP v +Ġm l +Ġext rad +39 0 +ĠPenn y +d ream +Ġstation ed +opt ional +ew orthy +. +ĠWorks hop +ĠRet ail +ĠAv atar +6 25 +N a +ĠV C +ĠSec ure +M Y +19 88 +oss ip +Ġpro state +Ġund en +Ġg amer +ĠCont ents +ĠWar hammer +ĠSent inel +3 10 +Ġse gregation +ĠF lex +ĠM AY +Ġdr ills +ĠDrug s +Islam ic +Ġsp ur +Ġca fe +Ġimag inary +Ġgu iding +Ġsw ings +ĠThe me +ob y +Ġn ud +Ġbe gging +Ġstr ongh +Ġreject ing +Ġpedest rians +ĠPro spect +R are +s le +Ġconcess ions +ĠConst itutional +Ġbe ams +Ġfib ers +p oon +Ġinstinct s +pro perty +ĠB IG +Sand ers +im ates +Ġco ating +Ġcorps es +ĠTR UE +check ed +Ġ16 6 +A sh +ĠJ S +ĠF iction +Ġcommun al +Ġener getic +oooo oooo +Ġnow adays +IL D +ib o +ĠSU V +R en +Ġdwell ing +Sil ver +Ġt ally +ĠM oving +Ġcow ard +Ġgener als +Ġhorn s +Ġcirc ulated +Ġrob bed +ĠUn limited +Ġharass ed +Ġinhib it +Ġcomp oser +ĠSpot ify +Ġspread s +3 64 +Ġsu icidal +Ġno ises +ĠSt ur +Ġs aga +ĠK ag +is o +Ġtheoret ically +M oney +Ġsimilar ity +Ġslic ed +ut ils +ing es +" - +Ġan th +Ġimp ed +Mod ule +Through out +Ġmen us +comm ittee +and i +ob j +in av +f ired +ĠAb dullah +Ġund ead +Ġfont s +H old +EN G +Ġsustain ability +Ġfl ick +Ġr azor +ĠF est +ĠChar acters +Ġword ing +Ġpopul ist +Ġcritic izing +Ġm use +v ine +Ġcard board +Ġkind ly +Ġfr inge +ĠThe ft +icult ural +Ġgovern ors +Ġ ���� +Ġ16 3 +Ġtime out +ĠA uth +Child ren +A U +Ġred emption +ĠAl ger +Ġ19 14 +Ġw aved +Ġastron auts +og rams +Ġsw amp +ĠFinn ish +Ġcand le +Ġton nes +ut m +Ġr ay +Ġsp un +Ġfear ful +art icles +Ġca us +or ically +ĠRequ ires +ĠG ol +Ġpop e +Ġinaug ural +Ġg le +AD A +ĠIS IL +ĠOff ensive +Ġwatch dog +Ġbal con +ent ity +ĠH oo +Ġgall on +AC C +Ġdoub ling +Ġimpl ication +ĠS ight +Ġdoct r +---- --- +Ġ\ \ +Ġm alt +R oll +Ġâī ¥ +Ġrec ap +add ing +u ces +ĠB end +fig ure +Ġtur key +Ġsoc ietal +ĠT ickets +Ġcommer cially +Ġsp icy +Ġ2 16 +ĠR amp +Ġsuperior ity +à ¯ +ĠTr acker +C arl +ĠC oy +ĠPatri ot +Ġconsult ed +Ġlist ings +Ġsle w +reens hot +ĠG one +Ġ[ ...] +30 9 +Ġh ottest +Ø ± +Ġrock y +ĠD iaz +Ġmass age +Ġpar aly +Ġp ony +A z +Ġcart ridge +ĠN Z +Ġsn ack +ĠLam ar +ple ment +ĠLes lie +Ġm ater +Ġsn ipp +24 6 +Ġjoint ly +ĠBris bane +ĠiP od +Ġpump ing +Ġgo at +ĠSh aron +eal ing +Ġcor on +Ġan omal +rah im +ĠConnect ion +Ġsculpt ure +Ġsched uling +ĠD addy +at hing +Ġeyeb rows +Ġcur ved +Ġsent iments +Ġdraft ing +D rop +( [ +Ġnom inal +ĠLeaders hip +ĠG row +Ġ17 6 +Ġconstruct ive +iv ation +Ġcorrupt ed +ger ald +ĠC ros +ĠChe ster +ĠL ap +ãģ ª +OT H +D ATA +Ġal mond +pro bably +I mp +Ġfe ast +ĠWar craft +F lor +Ġcheck point +Ġtrans cription +Ġ20 4 +Ġtwe aks +Ġrel ieve +S cience +Ġperform er +Z one +Ġtur moil +ig ated +hib it +ĠC afe +the med +Ġflu or +ben ch +Ġde com +ĠU nt +ĠBar rett +ĠF acts +Ġt asting +ĠPTS D +ĠSe al +ĠJuda ism +ĠDynam ic +ĠC ors +V e +ĠM ing +ĠTrans form +v on +ĠDef enders +ĠTact ical +ĠV on +ĠUn ivers +Ġdist orted +ĠB reath +?' " +Ġag on +ĠDead ly +Ġl an +ĠCy cle +orn ed +Ġrel iably +Ġgl or +ĠMon key +ãĥ ¡ +Ġad ren +Ġmicrow ave +ĠAl ban +irc raft +dig it +sm art +ĠD read +¯¯¯¯¯¯¯¯ ¯¯¯¯¯¯¯¯ +{ { +ĠRoc hester +Ġsimpl ified +Ġinf licted +Ġtake over +Ġyour selves +ad itional +Ġmus cular +K S +Ġing en +T ax +ĠFe ature +27 7 +Ġcru c +Ġcr ate +Ġun identified +Ġacclaim ed +ĠM anga +ĠFr ances +ĠNep al +ĠG erald +ĠKu wait +Ġsl ain +ĠHe b +ĠG oku +ãģ® æ +28 6 +M rs +ĠC ody +ĠSan ctuary +01 6 +Ġdism ant +Ġdatas et +ĠH ond +b uck +ĠPat terson +Ġpal ette +ĠG D +ic ol +ĠL odge +Ġplanet ary +ak in +ĠRegist ered +ab we +ĠPeters burg +Ġha iled +ĠP iece +S che +ĠDO J +Ġen umer +18 1 +ĠObs erver +ĠB old +f ounded +com merce +Ġexplo its +ĠF inding +UR N +ĠS ne +ĠAc id +ay ette +ĠVal ues +Ġdr astic +Ġarchitect ural +Ġ" . +× ķ +ump ed +Ġwra pping +Ġwid ow +ĠSl ayer +l ace +on ce +German y +av oid +Ġtem ples +P AR +à ´ +ĠLuc ifer +ĠFl ickr +l ov +for ces +Ġsc outing +Ġlou der +tes y +Ġbefore hand +Ä ĵ +ĠNe on +ĠW ol +ĠTyp ically +ĠPolit ico +-+ -+ +Ġbuild er +Ġder ive +K ill +Ġp oker +Ġambig uous +Ġlif ts +Ġcy t +Ġrib s +ood le +ĠS ounds +h air +ĠSynd rome +t f +Ġproport ional +u id +Ġper taining +ĠKind le +ĠNeg ro +Ġreiter ated +ĠTon ight +oth s +ĠCorn ell +Ġo wing +Ġ20 8 +elf are +oc ating +ĠB irds +Sub scribe +Ġess ays +Ġburd ens +Ġillust rations +ar ious +ER AL +ĠCal cul +Ġx en +ĠLink edIn +ĠJ ung +Ġredes ign +Con nor +29 6 +Ġrevers al +ĠAd elaide +ĠL L +Ġs inking +Ġg um +US H +c apt +ĠGr imm +Ġfoot steps +ĠCB D +isp ers +Ġpro se +Wed nesday +ĠM ovies +ed in +Ġoverturn ed +Ġcontent ious +US B +~~~~~~~~ ~~~~~~~~ +ĠCo pper +Ġpoint less +N V +val ues +olph in +d ain +Ġdepos ited +ĠG W +Ġpreced ed +ĠCl a +ĠGo lem +ĠN im +ĠÎ ² +ĠEngine ers +m iddle +Ġfl att +oper ative +Ġcouncil s +imb abwe +el in +Ġstress ful +ĠL D +Ġres h +l ake +Ġwheel chair +ĠAltern ative +Ġoptim ize +oper ation +Ġpe ek +Ġones elf +ig il +Ġtrans itions +op athy +bl ank +Ġ16 9 +17 1 +________________________________ ________________________________ +Ġl aundering +En c +ĠD EC +Ġwork outs +Ġsp ikes +Ġdin osaurs +Ġdiscrim inatory +P ool +R ather +38 5 +R NA +tes ters +et o +ĠIdent ity +Ġve in +ĠBur ton +Ġarc ade +4 20 +Ult imately +ĠSad ly +à ° +p ill +Ġcub ic +ĠSpect rum +the se +st ates +Ġun official +h awks +ĠEVER Y +Ġrain bow +Ġincarcer ation +and ing +Ġsy ll +ĠEver ton +Ġ17 9 +ĠSer bia +Ġ18 9 +m eter +ĠMic key +Ġant iqu +Ġfact ual +ne ck +ĠN are +n orm +m ust +Ġhigh ways +Ġgl am +Ġdivid ing +ĠSquad ron +ĠMar tha +Ġbirth s +C over +//////// //////// +ĠW ong +Ph ot +ĠA LS +ri o +ĠNon etheless +ĠL emon +Ġ20 6 +ĠE E +Ġderiv ative +ĠWW II +v ote +Ġthere in +Ġsepar ating +44 6 +sy nc +ĠStre ets +Ġr att +Ġmunicip ality +ĠShort ly +Ġmon k +) ," +Ġscr ub +Ġoper atives +Ne ither +Pl ace +ĠLim it +F emale +ĠAct or +Char acter +Ġconstit uted +35 7 +Ġprotest ed +ĠSt raw +ĠHe ight +ild a +ĠTy ph +Ġflood s +Ġcos metic +W AY +pert ure +up on +t ons +ess ing +ĠP ocket +Ġro oft +ĠC aucas +Ġant idepress +Ġincomp atible +EC D +Ġoper a +ĠCont est +Ġgener ators +l ime +Def ense +19 87 +for um +Ġsav age +ĠHung arian +n z +Ġmet allic +Ġex pelled +Ġres idency +Ġdress es +66 6 +ĠC lement +f ires +C ategory +Ġge ek +al is +Ġc emetery +educ ated +Ġc rawl +ĠUn able +ĠT yson +ak is +Ġp ardon +ĠW ra +Ġstrengthen ed +ĠF ors +33 5 +ĠH C +ĠM ond +Ġvisual s +ĠBeat les +ett lement +Ġ ï +g ro +Ġb ash +Ġpo orest +Ġex cel +Ġaspir ations +ĠM unicip +ens ible +Ġceremon ies +Ġintimid ation +ĠCON TR +be ck +ĠK ap +as u +Ġtradem arks +ĠS ew +ĠComp etition +net work +ĠAr ri +ĠT et +Ro aming +W C +D at +Ġso b +Ġpair ing +Ġoverd ose +SA Y +ab er +Ġrev olt +ĠF ah +act ing +e q +est ation +F ight +ĠMar ks +27 3 +Ġ17 8 +R aw +ãģ ĭ +34 9 +bl ocks +Ġver ge +est ine +ĠPod esta +Ġinv asive +Ġprofound ly +ĠA o +e ach +Ġl est +inter pret +Ġshr inking +Ġerr one +Ġche es +ly s +ĠI vy +ĠDirect ory +Ġhint ed +V ICE +Ġcontact ing +ĠG ent +he i +Ġlabel ing +Ġmerc ury +ĠL ite +Ġexp ires +Ġdest abil +rit is +c u +Ġfeather s +Ġste er +Ġprogram med +ĠV ader +Go ing +ĠE lim +Ġy o +ĠMic he +Ġ20 3 +Ġslee ves +Ġb ully +ĠHum ans +36 8 +Ġcomp ress +ĠBan ner +AR S +Ġa while +Ġcal ib +Ġspons orship +ĠDiff iculty +ĠP apers +Ġident ifier +} . +Ġy og +ĠSh ia +Ġclean up +Ġvib e +int rodu +im ming +Austral ia +Ġout lines +ĠY outube +tr ain +ĠM akes +Ġde ported +Ġcent r +ĠD ug +ĠB oulder +ĠBuff y +Ġinj unction +ĠHar ley +ĠG roups +ĠD umbledore +ĠCl ara +Ġ" - +Ġsacrific ed +ep h +Sh adow +ib ling +Ġfreel ance +Ġevident ly +ph al +Ġret ains +M ir +Ġfin ite +d ar +ĠC ous +Ġrep aired +Ġperiod ic +Ġchampions hips +Ġaster oid +bl ind +Ġexpress ly +ĠAst ros +Ġsc aled +Ġge ographical +ĠRap ids +En joy +Ġel astic +ĠMoh amed +Mark et +be gin +Ġdisco vers +Ġtele communications +Ġscan ner +Ġen large +Ġsh arks +Ġpsy chedel +ĠRou ge +Ġsnap shot +is ine +X P +Ġpestic ides +ĠL SD +ĠDist ribution +re ally +Ġde gradation +Ġdisgu ise +Ġbi om +ĠEX T +Ġequ ations +Ġhaz ards +ĠComp ared +) * +Ġvirt ues +Ġeld ers +Ġenh ancing +ĠAc ross +er os +ang ling +Ġcomb ust +ucc i +Ġconc ussion +Ġcontrace ption +ĠK ang +Ġexpress es +Ġa ux +ĠP ione +Ġexhib its +Deb ug +OT AL +ĠAl ready +ĠWheel er +Ġexp ands +? : +Ġreconc iliation +Ġpir ates +Ġpur se +Ġdiscour age +Ġspect acle +R ank +Ġwra ps +ĠTh ought +Ġimp ending +O pp +ĠAng lo +ĠE UR +Ġscrew ed +ret ched +Ġencour agement +mod els +Ġconf use +mm m +ĠVit amin +âĸij âĸij +C ru +Ġkn ights +Ġdisc ard +Ġb ishops +ĠW ear +ĠGar rett +k an +ãĥ Ł +Ġmascul ine +cap ital +ĠA us +Ġfat ally +th anks +ĠA U +ĠG ut +12 00 +Ġ 00000000 +Ġsur rog +ĠBI OS +ra its +ĠWat ts +Ġresur rection +ĠElect oral +ĠT ips +4 000 +Ġnut rient +Ġdepict ing +Ġspr ink +Ġm uff +ĠL IM +ĠS ample +ps c +ib i +gener ated +Ġspec imens +Ġdiss atisf +Ġtail ored +Ġhold ings +ĠMonth ly +ĠE at +po ons +Ġne c +ĠC age +ĠLot us +ĠLan tern +Ġfront ier +Ġp ensions +Ġj oked +ĠHard y +=-=- =-=- +r ade +U ID +Ġr ails +Ġem it +Ġsl ate +Ġsm ug +Ġsp it +ĠCall s +ĠJac obs +f eat +ĠU E +Ġrest ruct +Ġregener ation +Ġenerg ies +ĠCon nor +OH N +ĠChe ese +Ġg er +Ġresur rect +man agement +N W +Ġpres ently +ĠBru ins +M ember +ĠM ang +id an +Ġboost ing +w yn ++ . +requ isite +ĠNY PD +ĠMe gan +ĠCond itions +Ġp ics +nes ium +ĠR ash +Ġ17 4 +ĠD ucks +Ġemb ro +z u +on ian +rel igious +Ġc raz +ĠAC A +ĠZ ucker +EM A +ĠPro s +We apon +ĠKn ox +ĠAr duino +Ġst ove +Ġheaven s +ĠP urchase +Ġher d +Ġfundra iser +Dig ital +5 000 +Ġprop onents +/ âĢĭ +Ġj elly +ĠVis a +Ġmon ks +Ġadvance ment +ĠW er +Ġ18 7 +e us +ert ility +Ġfet al +Ġ19 36 +L o +Ġout fits +Ġstair case +b omb +Ġcustom ized +cl air +T ree +Ġm apped +ĠConsider ing +ĠTor res +Ġmeth yl +Ġapprox imate +Ġdo om +ĠHans en +Ġc rossover +Ġstand alone +ä ¼ +Ġinv ites +Ġgra veyard +Ġh p +Donald Trump +Ġesc ort +G ar +Ġpredec essors +Ġh ay +Ġen zyme +ĠStra ight +vis ors +I ng +ane ously +ĠApp lied +Ġf ec +ĠDur ant +Ġout spoken +or b +Ġz eal +Ġdisgr ace +' ). +ĠChe ng +28 9 +ĠRen a +ĠSu icide +29 4 +Ġout raged +ĠNew man +ĠN vidia +ĠA ber +ĠB ers +Ġrecre ation +Wind ow +ĠD P +x e +Ġped oph +Ġfall out +ambo o +Ġpresent ations +ĠApp s +Ġh tml +3 45 +ĠX XX +Ġrub bing +ĠLe ather +Ġhum idity +se ys +est ablished +ĠUn its +64 6 +Ġrespect able +A uto +Ġthri ving +ĠInn ovation +ang s +Ext ra +reg ulation +29 8 +p ick +Ex amples +ĠC J +Att ack +Ġdr acon +L T +Ġstick er +re rs +Ġsun ny +I ss +reg ulated +d im +ĠAb stract +Ġhus bands +Off ice +om ination +it ars +AN GE +asc al +ĠK ris +ĠInf antry +Ġm alf +ĠA the +ĠR ally +bal anced +................ ........ +OU P +Ġmole cule +met ics +ĠSpl it +ĠInstruct ions +ĠN ights +c ards +Ġt ug +Ġcon e +å Ń +Ġt x +ĠDisc ussion +Ġcatast rophe +pp e +g io +Ġcommun ism +Ġhal ted +ĠGu ant +cle an +ĠSc hed +ĠK anye +Ġw ander +ĠSer iously +Ġ18 8 +enn ial +f ollow +product ive +ĠFl ow +ĠS ail +Ġc raw +Ġsim ulations +or u +ang les +ĠN olan +Ġmen stru +4 70 +Ġ20 7 +aj a +Ġcas ually +board ing +Ġ2 22 +ov y +ĠN umbers +um at +O E +28 7 +ĠCle mson +Ġcert s +Ġsl id +ĠT ribe +Ġto ast +Ġfort unes +Ġf als +ĠComm ittees +Ġg p +Ġf iery +ĠN ets +ĠAn ime +Pack age +ĠComp are +l aughter +in fect +Ġatroc ities +Ġjust ices +Ġins ults +ĠVern on +Ġsh aken +Ġperson a +est amp +36 7 +br ain +Ġexperiment ing +K en +ĠElect ronics +Ġ16 1 +dom ain +Ġgraph ical +b ishop +Ġwho pping +ĠEv angel +Ġadvertis ers +ĠSpe ar +Ġb ids +Ġdestro ys +ut z +Ġunders c +ĠAD D +Ġan ts +ĠC um +ipp les +ĠF ill +Ġgl anced +Ġind icted +ĠE ff +Ġmis con +ĠDes ktop +Ġab ide +ãĥ Ģ +ĠI o +ĠC oul +Ġcaps ule +ĠCh rys +M ON +Ġund es +ĠI RA +Ġc itation +Ġdict ate +ĠNet works +ĠConf lict +ĠSt uff +x a +is ec +ĠChem istry +Ġquarter ly +William s +an an +O pt +ĠAlexand ria +out heastern +ĠSpring field +ĠBlack s +Ġge ography +24 2 +Ġut most +ĠEx xon +ab outs +E VA +ĠEn able +ĠBar r +Ġdisag reed +ĠCy prus +Ġdement ia +Ġlab s +Ġubiqu itous +ĠLO VE +Ġconsolid ated +s r +Ġcream y +ĠTim ber +Reg ardless +ĠCert ificate +Ġ" ... +ogen ous +Capt ain +Ġinsult ing +ĠSor os +ĠInst r +ĠBulgar ia +bet ter +Ġsuck ing +ĠDavid son +at z +Ġcoll ateral +g if +Ġplag ued +ĠC ancel +ĠGard ner +R B +Ġsix teen +Rem ove +ur istic +c ook +R od +Ġcompr ising +f le +) âĢĶ +ĠVik ing +g rowth +agon al +Ġsr f +af ety +m ot +N early +st own +ĠF actor +Ġautom obile +Ġproced ural +m ask +amp ires +Ġdisapp ears +j ab +3 15 +Ġ19 51 +ne eded +Ġd aring +le ader +Ġp odium +Ġun healthy +Ġm und +Ġpy ramid +oc re +Ġkiss ed +Ġdream ed +ĠFant astic +ĠG ly +å Ĭ +Ġgreat ness +Ġsp ices +Ġmet ropolitan +Ġcomp uls +i ets +101 6 +ĠSh am +ĠP yr +fl ies +ĠMid night +Ġswall owed +Ġgen res +ĠL ucky +ĠRew ards +Ġdisp atch +ĠI PA +ĠApp ly +Ġa ven +al ities +3 12 +th ings +Ġ( ). +Ġm ates +ĠS z +ĠC OP +ol ate +O FF +Ġre charge +c aps +ĠYork er +ic one +Ġgal axies +ile aks +D ave +ĠP uzz +ĠCelt ic +ĠA FC +27 6 +ĠS ons +Ġaffirm ative +H or +Ġtutorial s +ĠC ITY +ĠR osa +ĠExt ension +Ser ies +Ġf ats +Ġr ab +l is +Ġun ic +Ġe ve +ĠSp in +Ġadul thood +ty p +Ġsect arian +Ġcheck out +ĠCy cl +S ingle +Ġmart yr +Ġch illing +88 8 +ou fl +Ġ] ; +Ġcongest ion +m k +ĠWhere as +Ġ19 38 +ur rencies +er ion +Ġbo ast +ĠPat ients +Ġch ap +ĠB D +real DonaldTrump +Ġexam ines +h ov +Ġstart ling +ĠBab ylon +w id +om ew +br ance +ĠOd yssey +w ig +Ġtor ch +ĠV ox +ĠMo z +ĠT roll +ĠAn s +Similar ly +ĠF ul +00 6 +Un less +ĠAl one +st ead +ĠPub lisher +r ights +t u +ĠDoes n +Ġprofession ally +Ġcl o +ic z +Ġste als +Ġ á +19 86 +Ġst urdy +ĠJoh ann +Ġmed als +Ġfil ings +ĠFr aser +d one +Ġmult inational +Ġf eder +Ġworth less +Ġp est +Yes terday +ank ind +Ġg ays +Ġb orne +ĠP OS +Pict ure +Ġpercent ages +25 1 +r ame +Ġpot ions +AM D +ĠLeban ese +Ġr ang +ĠL SU +ong s +Ġpen insula +ĠCl ause +AL K +oh a +ĠMac Book +Ġunanim ous +Ġl enders +Ġhang s +Ġfranch ises +ore rs +ĠUp dates +Ġisol ate +and ro +S oon +Ġdisrupt ive +ĠSur ve +Ġst itches +ĠSc orp +ĠDomin ion +Ġsupp lying +Ar g +Ġtur ret +ĠL uk +Ġbr ackets +* ) +ĠRevolution ary +ĠHon est +Ġnot icing +ĠSh annon +Ġafford ed +Ġth a +ĠJan et +! -- +ĠNare ndra +ĠPl ot +H ol +se ver +e enth +Ġobst ruction +Ġ10 24 +st aff +j as +or get +sc enes +l aughs +ĠF argo +cr ime +Ġorche str +Ġde let +ili ary +rie ved +Ġmilit ar +ĠGreen e +âĹ ı +ãģ ¦ +ĠGu ards +Ġunle ashed +ĠWe ber +Ġadjust able +Ġcal iber +Ġmotiv ations +Ġà ł +m Ah +ĠL anka +hand le +Ġp ent +ĠR av +ĠAng ular +ĠK au +umb ing +Ġphil anthrop +Ġde hyd +Ġtox icity +e er +ĠY ORK +w itz +å ¼ +ĠI E +commun ity +ĠA H +Ġret ali +Ġmass ively +ĠDani els +ĠD EL +Ġcar cin +Ur l +Ġrout ing +ĠNPC s +ĠR AF +ry ce +Ġwa ived +ĠGu atem +Every body +Ġco venant +Ġ17 3 +Ġrelax ing +Ġqu art +al most +Ġguard ed +ĠSold iers +ĠPL AY +Ġout going +L AND +Ġre write +ĠM OV +ĠIm per +ĠS olution +Ġphenomen al +Ġl ongevity +Ġimp at +ĠN issan +ir ie +Ġod or +ĠZ ar +ok s +Ġmilit ias +ĠSP EC +Ġtoler ated +ars er +ĠBrad ford ++ , +Ġsur real +s f +Can adian +Ġresemb lance +Ġcarbohyd rate +VI EW +Ġaccess ory +me al +larg est +ieg el +Some one +Ġtoug hest +os o +Ġfun nel +Ġcondemn ation +lu ent +Ġw ired +ĠSun set +Jes us +ĠP ST +ĠP ages +ĠTy coon +ĠP F +Ġselect ions +Ġ ठ+part isan +Ġhigh s +ĠR une +Ġcraft s +le ad +ĠParent s +Ġre claim +ek er +ĠAll ied +ae per +Ġlo oming +Ġbenefic iaries +ĠH ull +Stud ents +Jew ish +d j +Ġp act +tem plate +ĠOffic ials +ĠBay lor +Ġhe mp +Ġyouth s +ĠLevel s +ĠX iao +ĠC hes +Ġende avor +ĠRem oved +Ġhipp ocamp +H ell +ãĤ Ĭ +80 5 +Ġd inosaur +ĠWr ath +ĠIndones ian +Ġcalcul ator +ĠD ictionary +Ġ4 20 +ĠM AG +( _ +! , +t arians +Ġrestrict ing +rac use +Ġweek day +OU NT +Ġsh rugged +leg round +Ġb ald +ĠDo ctors +Ġt outed +ĠMax well +Ġ2 14 +Ġdiplom at +Ġrep ression +Ġconstitu ency +v ice +r anked +ĠNap oleon +g ang +ĠFore ver +t un +Ġbul b +ĠPD T +ĠC isco +V EN +Ġres umed +Ste ven +ĠManit oba +Ġfab ulous +ĠAg ents +19 84 +Ġam using +ĠMyster ies +Ġor thodox +fl oor +Ġquestion naire +Ġpenet rate +Ġfilm makers +ĠUn c +Ġst amped +Ġth irteen +Ġout field +Ġforward ed +Ġapp ra +Ġa ided +t ry +Ġunf ocused +ĠL iz +ĠWend y +ĠSc ene +Ch arg +Ġreject s +Ġleft ist +ĠProv idence +ĠBr id +reg n +Ġprophe cy +ĠL IVE +4 99 +Ġfor ge +ĠF ML +Ġintrins ic +ĠF rog +Ġw ont +ĠH olt +Ġfam ed +CL US +aeper nick +ĠH ate +ĠC ay +Ġregister ing +ort ality +rop y +ocaly ptic +a an +n av +Ġfasc ist +IF IED +Ġimpl icated +ĠRes ort +ĠChand ler +ĠBr ick +P in +ys c +Us age +ĠHel m +us ra +âĺħ âĺħ +ĠAb bas +Ġunanim ously +Ġke eper +Ġadd icted +?? ? +Ġhelm ets +Ġant ioxid +aps ed +80 8 +gi ene +Ġwa its +Ġmin ion +ra ved +ĠP orsche +Ġdream ing +Ġ17 1 +ĠC ain +Ġun for +ass o +ĠConfig uration +k un +hard t +Ġn ested +ĠL DS +L ES +Ġt ying +en os +Ġc ue +ĠMar qu +sk irts +Ġclick ed +Ġexp iration +ĠAccording ly +ĠW C +Ġbless ings +Ġaddict ive +ĠN arr +y x +ĠJagu ars +Ġrent s +ĠS iber +Ġt ipped +ous se +ĠFitz gerald +Ġhier arch +out ine +Ġwa velength +> . +ch id +ĠProcess ing +/ + +r anking +E asy +ĠConst ruct +Ġt et +ins ured +H UD +Ġqu oting +Ġcommun icated +in x +Ġin mate +Ġerect ed +ĠAbs olutely +ĠSure ly +Ġun im +ĠThr one +he id +Ġcl aws +Ġsuper star +ĠL enn +ĠWh is +U k +ab ol +Ġsk et +ĠN iet +Ġper ks +Ġaff inity +Ġopen ings +phas is +Ġdiscrim inate +T ip +v c +Ġgr inding +ĠJenn y +Ġast hma +hol es +ĠHom er +Ġreg isters +ĠGl ad +Ġcre ations +Ġlith ium +Ġappl ause +unt il +Just ice +ĠTur ks +Ġsc andals +Ġb ake +t ank +M ech +ĠMe ans +ĠM aid +Republic ans +is al +wind ows +ĠSant os +Ġveget ation +33 8 +t ri +Ġfl ux +ins ert +Ġclar ified +Ġmort g +ĠCh im +ĠT ort +Ġdiscl aim +met al +ĠAs ide +Ġindu ction +Ġinf l +Ġathe ists +amp h +Ġe ther +ĠV ital +ĠBu ilt +M ind +Ġweapon ry +S ET +Ġ18 6 +ad min +g am +cont ract +af a +Ġderiv atives +Ġsn acks +Ġch urn +E conom +Ġca pped +ĠUnder standing +ĠH ers +ĠI z +Ġd uct +I ENT +augh ty +Ġâľ Ķ +ĠN P +Ġsa iling +In itialized +Ġt ed +Ġreact ors +ĠL omb +Ġcho ke +ĠW orm +Ġadm iration +Ġsw ung +ens ibly +Ġr ash +ĠGo als +ĠImport ant +Sh ot +ĠR as +Ġtrain ers +ĠB un +Work ing +Ġhar med +ĠPand ora +ĠL TE +Ġmush room +ĠCH AR +ĠF ee +ĠM oy +B orn +ol iberal +ĠMart ial +Ġgentle men +Ġling ering +Offic ial +Ġgra ffiti +ĠN ames +D er +Ġqu int +ist rate +aze era +ĠNOT ICE +ĠFlore nce +Ġpay able +Ġdep icts +ĠSpe cies +He art +âĶĢâĶĢâĶĢâĶĢ âĶĢâĶĢâĶĢâĶĢ +Ġencl osed +Incre ases +D aily +ĠL is +Ġenact ment +ĠB acon +ĠSt eele +dem and +Ġ18 3 +Ġmouth s +Ġstr anded +Ġenhance ment +01 1 +ĠWh ats +Ġhe aled +en y +ĠR ab +Ġ3 40 +ĠLab yrinth +ro ach +ĠY osh +ĠCl ippers +Ġconcert s +Intern et +35 5 +Ġstick ers +Ġter med +ĠAx e +Ġgrand parents +Fr ance +ĠCl im +ĠU h +ul ic +Ġthr ill +cent ric +ĠOver view +ĠCond uct +Ġsubstant ive +Ġ18 2 +m ur +Ġstr ay +ĠCo ff +Ġrep etitive +ĠFor gotten +Ġqual ification +ew itness +ĠZ imbabwe +Ġsim ulated +ĠJ D +25 3 +ĠW are +Ġun sc +T imes +Ġsum mons +Ġdis connected +Ġ18 4 +ci us +ĠGu jar +od ka +Ġer ase +ĠTob acco +elect ed +Ġun cont +ĠShe pard +ĠL amp +Ġalert ed +Ġoper ative +arn a +u int +Ġneglig ence +ac ements +Ġsup ra +Ġprev ail +ĠSh ark +Ġbel ts +ãģ « +Ġt ighter +Engine ers +Ġin active +Ġexp onent +ĠWill ie +a ples +Ġhe ir +ĠH its +ian n +ĠS ays +Ġcurrent s +ĠBeng al +Ġar ist +B uffer +Ġbree ze +ĠWes ley +Col a +Ġpron oun +Ġde ed +ĠK ling +Ġof t +Ġinf lict +Ġpun ishing +Ġn m +ik u +OD UCT +01 4 +Ġsubsid y +ĠDE A +ĠHer bert +ĠJ al +B ank +Ġdef erred +Ġship ment +B ott +Ġal le +b earing +HT ML +Off line +Ġ2 13 +Ġscroll ing +Ġsc anned +ĠLib yan +ĠT OP +ch rom +d t +col umn +Psy NetMessage +Z ero +Ġtor so +0 50 +âķ IJ +Ġimp erson +ĠSchw artz +ud ic +Ġpiss ed +ĠS app +25 7 +ĠIS Ps +og l +Ġsuper vised +Ġad olescent +Ġatt ained +ĠDel ivery +ĠB unny +Ġ19 37 +Ġmini ature +Ġo s +Ġ3 70 +60 8 +ĠMour inho +Ġinn ate +Ġtem po +ĠN M +ĠFall en +00 9 +Ġprov ocative +Stream er +ĠBened ict +ĠBol she +Ġt urtle +ĠPC B +ĠEqu al +Direct or +ĠR end +Ġflu ids +Author ities +Ġcous ins +requ ency +ĠNeigh bor +s ets +sh ared +Char les +pass word +Ġg ears +Ġ2 11 +ĠHard ware +ri ka +Ġup stream +H om +Ġdisproportion ately +iv ities +Ġund efined +Ġelect rons +Ġcommem or +Event ually +Ġ> < +Ġir responsible +2 18 +ĠRe leased +ĠO VER +ĠI GN +ĠB read +st ellar +ĠS age +tt ed +dam age +ed ition +ĠPre c +Ġl ime +Ġconf inement +Ġcal orie +we apon +Ġdiff ering +ĠS ina +m ys +am d +Ġintric ate +k k +ĠP AT +ã o +st ones +lin ks +Ġr anch +Sem itic +Ġdifferent iate +ĠS inger +occup ied +Ġfort ress +c md +Ġinter ception +ĠAnk ara +Ġre pt +ĠSol itaire +Ġrem ake +p red +Ġd ared +aut ions +ĠB ACK +Run ning +Ġdebug ging +Ġgraph s +3 99 +ĠNig el +Ġb un +Ġpill ow +Ġprog ressed +fashion ed +Ġob edience +ER N +Ġrehe ars +C ell +t l +S her +Ġher ald +ĠPay ment +ĠC ory +ĠDe pt +Ġrep ent +ĠWe ak +uck land +Ġple asing +Ġshort ages +Ġjur ors +ĠK ab +q qa +Ant i +Ġw ow +ĠRC MP +Ġt sun +ĠS ic +Ġcomp rises +Ġsp ies +Ġprec inct +n u +Ġur ges +Ġtim ed +Ġstrip es +ĠB oots +Ġy en +Adv anced +Ġdisc rete +ĠArch angel +employ ment +D iff +Ġmon uments +Ġ20 9 +work er +Ġ19 6 +ĠI g +utter stock +T PS +J ac +Ġhomeless ness +Ġcomment ator +Ġrac ially +f ing +se ed +E le +ell ation +Ġeth anol +Ġpar ish +ĠD ong +ĠAw akening +Ġdev iation +ĠB earing +ĠTsu k +Ġrec ess +Ġl ymph +ĠCann abis +å ľ +ĠNEW S +Ġd ra +ĠStef an +ĠWr ong +ĠS AM +Ġloose ly +Ġinterpre ter +ĠPl ain +Go vernment +Ġbigot ry +Ġgren ades +ave z +pict ured +Ġmand ated +ĠMon k +ĠPed ro +Ġl ava +27 4 +Ġcyn ical +ĠScroll s +l ocks +M p +Ġcon gregation +orn ings +ph il +ĠI bid +Ġf erv +Ġdisapp earing +Ġarrog ant +sy n +ĠMa ver +ĠSu it +24 1 +Ġab bre +ack ers +P a +ĠY el +Whe never +Ġ23 5 +ĠV ine +ĠAn at +Ġext inct +LE T +Ġexecut able +V ERS +ox ide +D NA +ĠP rel +Ġresent ment +Ġcompr ise +ĠAv iv +Ġinter ceptions +Ġprol ific +IN A +ĠEr in +though t +2 19 +ĠPsychiat ry +un ky +chem ist +H o +ĠMcC oy +Ġbr icks +L os +ri ly +ĠUS SR +Ġr ud +Ġl aud +ĠW ise +ĠEmer ald +Ġrev ived +Ġdam ned +ĠRep air +id em +ct ica +Ġpatri arch +ĠN urs +me g +Ġcheap est +re ements +empt y +ĠCele br +Ġdepri vation +ch anted +ĠTh umbnails +E nergy +ĠEth an +ĠQ ing +Ġopp oses +W IND +v ik +ĠM au +ĠS UB +66 7 +G RE +ĠVol unte +nt on +C ook +å IJ +es que +Ġplum met +Ġsu ing +Ġpron ounce +Ġresist ing +ĠF ishing +ĠTri als +Ġy ell +Ġ3 10 +Ġin duct +Ġpersonal ized +oft en +R eb +EM BER +Ġview point +Ġexist ential +() ) +rem ove +MENT S +l asses +Ġev apor +Ġa isle +met a +Ġreflect ive +Ġentit lement +Ġdev ised +mus ic +asc ade +Ġwind ing +off set +Ġaccess ibility +ke red +Bet ter +ĠJohn ston +th inking +S now +ĠCroat ia +ĠAt omic +27 1 +34 8 +Ġtext book +ĠSix th +Ġ اÙĦ +Ġsl ider +ĠBur ger +b ol +S ync +Ġgrand children +Ġc erv ++ ) +Ġe ternity +Ġtweet ing +Ġspec ulative +Ġpiv otal +ĠW P +ĠT ER +ynam ic +Ġu pl +ĠC ats +per haps +Ġclass mates +Ġblat ant +' - +Ġl akh +ant ine +ĠB org +i om +/ ( +ĠAthlet ic +Ġs ar +OT A +ĠHoff man +Never theless +Ġad orable +Ġspawn ed +Ass ociated +ĠDom estic +Ġimpl ant +ĠLux em +ĠK ens +Ġp umps +ĠS AT +Att ributes +50 9 +av our +Ġcentral ized +ĠT N +Ġfresh ly +ĠA chieve +Ġouts iders +her ty +ĠRe e +ĠT owers +ĠD art +ak able +Ġm p +ĠHeaven ly +Ġr ipe +ĠCarol ine +ry an +Ġclass ics +Ġret iring +Ġ2 28 +Ġa h +Ġdeal ings +Ġpunch ing +ĠChap man +O ptions +max well +vol ume +Ġst al +Ġex ported +ĠQu ite +Ġnumer ical +B urn +F act +ĠKey stone +Ġtrend ing +Ġalter ing +ĠAfric ans +47 8 +ĠM N +ĠKn ock +Ġtempt ation +Ġprest ige +Over view +ĠTrad itional +ĠBah rain +Priv ate +ĠH OU +Ġbar r +ĠT at +C ube +US D +ĠGrand e +ĠG at +ĠFl o +Ġres ides +Ġind ec +vol ent +Ġperpet ual +ub es +Ġworld view +ĠQuant um +Ġfil tered +Ġen su +orget own +ERS ON +ĠM ild +37 9 +OT T +à ¥ +Ġvit amins +Ġrib bon +Ġsincere ly +ĠH in +Ġeight een +Ġcontradict ory +Ġgl aring +Ġexpect ancy +Ġcons pir +Ġmon strous +Ġ3 80 +re ci +Ġhand ic +Ġpump ed +Ġindic ative +Ġr app +Ġav ail +ĠLEG O +ĠMar ijuana +19 85 +ert on +Ġtwent ieth +################ ################ +ĠSw amp +Ġval uation +Ġaffili ates +adjust ed +ĠFac ility +26 2 +Ġenz ymes +itud inal +Ġimp rint +S ite +Ġinstall er +ĠT RA +m ology +lin ear +ĠCollect ive +ig ating +ĠT oken +Ġspec ulated +K N +ĠC ly +or ity +Ġdef er +Ġinspect ors +appro ved +R M +ĠSun s +Ġinform ing +ĠSy racuse +ib li +7 65 +Ġgl ove +Ġauthor ize +âĢ¦âĢ¦âĢ¦âĢ¦ âĢ¦âĢ¦âĢ¦âĢ¦ +ĠCru ise +Ġcontract ing +she ll +IF E +ĠJew el +p ract +ĠPhot oshop +ĠKnow ing +h arm +Ġattract ions +ad an +et us +01 8 +w agen +Al t +Ġmultip ly +Ġequ ilibrium +: { +ĠF ighters +ĠEd gar +Ġfour teen +Go vern +Ġmis use +Ġab using +Ġancest ry +ram er +64 4 +Ġwor ms +Ġthick er +ĠComb ine +Ġpeas ants +Ġv ind +Ġcon quest +Ġm ocked +Ġc innamon +ĠC ald +ĠGall up +Ġavoid ance +Ġincarn ation +ĠStr at +Ġt asted +ent a +ĠN eal +p ared +Ġtermin ology +ject ion +Scient ists +ĠIN S +ĠDe e +Ġdirect ories +R oad +ĠSh ap +br ight +ĠDirect ors +ĠCol umn +Ġb ob +Ġprefer ably +Ġgl itch +f urt +Ġe g +id is +C BC +Ġsur rendered +Ġtest ament +33 6 +ug gest +ĠN il +an other +Ġpat hetic +ĠDon na +Ġ2 18 +ĠA very +Ġwhis key +Ġf ixture +ĠCon quest +Ġbet s +O cc +ĠLe icester +] ." +Ġ) ); +Ġfl ashes +45 6 +Ġmask ed +ge bra +Ġcomput ed +che l +aud er +Ġdefe ats +ĠLiber ation +ĠOs ama +ĠV ive +Ch anges +Ch annel +Ġtar iffs +Ġm age +ĠS ax +Ġinadvert ently +ĠC RE +ĠRe aper +ink y +gr ading +Ġstere otyp +Ġcur l +ĠF ANT +Ġfram eworks +M om +ĠAn ch +Ġflav our +car bon +Ġperm itting +let cher +ĠMo zilla +ĠPark ing +ĠCh amp +Sc roll +Ġmurd erer +Ġrest ed +Ġow es +ĠP oss +AD D +IF F +res olution +ĠMin ing +Ġcompar ative +D im +Ġneighbour ing +ĠA ST +ĠT oxic +Ġbi ases +Ġgun fire +ur ous +ĠMom ent +19 83 +Ġper vasive +tt p +ĠNorm ally +r ir +S arah +ĠAlb any +Ġun sett +ĠS MS +ip ers +l ayer +ĠWh ites +up le +Ġtur bo +ĠLe eds +Ġthat s +ĠMin er +M ER +ĠRe ign +Ġper me +ĠBl itz +Ġ19 34 +Ġintimid ating +t ube +Ġecc entric +ab olic +box es +ĠAssoci ates +v otes +Ġsim ulate +um bo +aster y +Ġship ments +FF FF +an th +Ġseason ed +Ġexperiment ation +âĸ ł +law s +Me et +idd les +ant ics +R ating +IS IS +h ift +Ġfront s +b uf +01 7 +Ġun att +ĠD il +le ases +ĠGard ens +77 7 +t ouch +ve ll +45 8 +Ġ= ==== +s aving +Ġer osion +ĠQu in +Ġearn s +Ġaccomplish ment +ĠWe i +Ġ< [ +____ _ +Ġir rig +ĠT eddy +Ġconqu ered +ĠArm ored +Ġassert s +Ġmanip ulating +r é +Ġtranscript s +G allery +Ġplot ting +Ne il +Ġbetray al +load er +ĠS ul +Ġdispl acement +Ġroy alty +ĠW I +he it +ĠDev ices +alle l +Ġmunicipal ities +Ġcan al +St ars +ĠU AE +Ġ" âĢ¦ +ĠC U +ab ove +Ġreson ance +ĠguiActive Un +add ed +ĠBra ves +ĠI bn +Ġhere by +ĠB RE +Ġshare holder +ĠH ir +ĠJ i +Ġstrange ly +Ġadm ired +Ġpl ight +Ġb achelor +ĠP ole +cipl inary +T ony +ĠArmen ian +Ġun man +ĠZion ist +St age +isco ver +Ġautom otive +Ġs idelines +Ġsl ick +ĠRena issance +ĠF UN +Im ages +ĠH aj +Ġp ing +Ġshort cut +ĠBl vd +ĠLook s +Ġbur sts +Ġcl amp +Ġm ish +Ġsort ing +Ġpatri ot +Ġcorrect ness +ĠScand inav +ĠCaval iers +p ython +az ar +Ġ3 75 +ĠJa une +40 9 +Ġdetrim ental +Ġstab bing +Ġpoison ed +Ġf ountain +oc ent +or st +ĠMar i +Ġr ains +ĠO vers +ĠInst itution +ud get +AM Y +t ale +ĠK R +ĠPr ices +Ġhead aches +Ġlands l +ĠA ura +Bon us +ĠZ hao +ĠH ip +Ġhop s +ĠKurd istan +Ġexplo iting +ry n +Ġhypocr isy +op ening +Ġgun shot +Ġw ed +inter stitial +Inter stitial +Ġam en +Bre aking +Ġmarket ed +W ire +ĠC rowd +Contin ue +ĠK nown +ĠEffect ive +ore an +iz ons +Jose ph +Ġescal ation +us ername +Ġcur tain +AT ES +ĠP AR +ĠM iy +Ġcounter fe +l ene +Ġcont enders +d aily +ĠAs c +ĠPhill ip +most ly +Ġfil ename +he ne +Ġresemb ling +Ġst aging +ĠCh loe +Ġw iring +H on +ĠRen ew +ott age +ĠHy brid +m uch +Ġstro kes +Ġpolicy makers +AP TER +ĠArk ham +pl ot +Ġassist ants +Ġde port +ĠSe ga +Ġinflu enza +ĠC ursed +ĠK obe +Ġskin ny +Prov ider +ĠR ip +Ġincrement al +product s +B F +Ġd ome +ĠC redits +Ġlos ers +int s +ĠBet ty +ĠTal ent +ĠD AM +L v +E ss +Ġd ens +tem p +J udge +od ic +Ġ' ( +UR ES +ets k +V O +Ġretrie ved +Ġarchitect s +Ù ĩ +Ġeth ic +ĠSecond ary +st ocks +ad ia +Ġ3 25 +ĠOp inion +Ġsimultane ous +Ġd izz +ul p +Ġsmugg ling +ipp ery +R andom +f acing +ĠD as +Ġstock p +Ġdiscl osures +po inter +Ġcor al +ĠSe lection +ĠP ike +ival ent +Ġruth less +ĠR im +Ġensu ing +ĠExper iment +Ġcongress man +Ġbelie ver +Ġun specified +ĠM ord +Ġknowledge able +ĠV ERY +T X +Ġstra ps +Ġtur f +apesh ifter +Ġmar ital +Ġfl ock +ãģ Ĩ +26 3 +AM ES +ĠOpp osition +Ġtre asures +ĠG OD +Ġmodel ed +ĠWOR LD +Ġ( [ +ĠUs age +H F +Ġ$ ( +uss ed +Ġpione er +E ight +par se +b read +rit z +ĠMir anda +ĠK ant +++ ) +ore n +Ġprov oked +Ġbre eds +ĠIn cludes +ĠPast ebin +ĠFl ip +J ava +Ġbr ink +Ġrum ored +Ġun seen +Ġgar nered +ĠDef in +al ted +Ġtatt oos +Ġhes itation +is itions +ĠWe aver +ĠReport ing +Ġtherap ies +Ġconsult ants +Ġresid ual +ĠMal i +ĠRom a +i ago +ĠRes idents +ub i +Ġremed ies +Ġadapt ive +ĠAl ive +ĠBar cl +Ġwal lets +c rypt +etermin ation +ĠPel osi +Ġsl ipping +oton in +Ġall iances +pat rick +ir is +Ġor th +ĠPer kins +ĠDe V +ĠG ets +Ġdry ing +ge e +fore st +ĠFor get +ore m +33 9 +Ġvague ly +ĠD ion +ĠP orn +ĠH OW +Ġp neum +Ġrub ble +ĠT aste +enc ia +ĠG el +Ġd st +Ġ24 5 +ĠMoroc co +inf lamm +ĠTw ins +Ġb ots +d aughter +ĠB alk +Ġbre thren +Ġlog os +Ġgo bl +f ps +Ġsub division +Ġp awn +Ġsquee zed +Ġmor ale +ĠD W +' " +Ġkn ot +ook y +Ġdiv isive +Ġboost ed +ch y +ãĥ IJ +if act +Ġnewcom ers +ĠWrest ling +Ġsc outs +w olves +R at +Ġnin eteenth +ĠOs borne +St ats +Ġem powered +Ġpsych opath +ĠO EM +ugg age +ĠP K +ĠMoh ammad +P ak +Ġanarch ists +ĠExt ract +est hes +ĠStock holm +l oo +ĠG raph +Ġdeploy ing +ĠStr anger +ĠM old +Ġstaff er +Ġdiscount ed +uck le +ple ase +ĠLand ing +ÃŃ a +Ġ19 3 +Ġan te +Ġrep etition +Ġ+ /- +Ġpar ody +Ġlive ly +AA A +ĠHor us +Ġp its +ind ers +L OC +ĠVen ice +40 6 +ĠDis cover +â Ĩ +ellect ual +Ġp ens +Ġey el +ig uous +Im pl +Ġj oking +Ġinv al +ĠBel fast +Ġcredit ors +ĠSky walker +ov sky +Ġcease fire +Ġse als +is oft +) ). +ĠFel ix +IT S +Ġt resp +ĠBlock chain +ew are +ĠSch war +en ne +mount ed +ĠBe acon +les h +Ġimmense ly +Ġche ering +Em ploy +sc ene +ish ly +atche wan +ĠNic olas +Ġdr ained +ĠEx it +ĠAz erb +j un +Ġflo ated +u ania +De ep +Ġsuper v +Ġmyst ical +ĠD ollar +ĠApost le +ĠR EL +ĠProv ided +ĠB ucks +ãĥ ´ +cut ting +Ġenhance ments +ĠPengu ins +ĠIsa iah +Ġj erk +ĠW yn +Ġst alled +Ġcryptoc urrencies +ĠR oland +sing le +Ġl umin +ĠF ellow +ĠCap acity +ĠKaz akh +W N +Ġfin anced +38 9 +Ġt id +Ġcoll usion +ĠMy r +î Ģ +Sen ator +Ġped iatric +Ġneat ly +Ġsandwic hes +ĠArchitect ure +Ġt ucked +Ġbalcon y +Ġearthqu akes +qu ire +F uture +Ġhe fty +é Ĺ +Ġspecial izes +Ġstress es +Ġs ender +Ġmisunder standing +Ġep ile +Ġprov oke +ĠCol ors +Ġdis may +uk o +[ _ +58 6 +ne utral +Ġdon ating +ĠRand all +Mult i +Ġconvenient ly +ĠS ung +ĠC oca +Ġt ents +ĠAc celer +Ġpart nered +27 2 +ir ming +ĠB AS +s ometimes +Ġobject ed +ub ric +p osed +LC S +gr ass +Ġattribut able +V IS +Israel i +Ġrepe ats +ĠR M +v ag +ut a +in ous +Ġin ert +ĠMig uel +æ Ń +ĠHawai ian +B oard +Ġart ific +ĠAzerb ai +as io +ĠR ent +A IN +Ġappl iances +Ġnational ity +Ġass hole +ĠN eb +Ġnot ch +h ani +ĠBr ide +Av ailability +Ġintercept ed +Ġcontin ental +Ġsw elling +ĠPers pect +b ies +. < +ith metic +ĠL ara +Ġtempt ing +add r +Ġoversee ing +cl ad +ĠD V +ĠGing rich +Ġm un +ĠApp ropri +Ġalter ations +ĠPat reon +Ġha voc +Ġdiscipl ines +Ġnotor iously +aku ya +ier i +? ). +ĠW ent +Ġsil icon +Ġtre mb +Cont ainer +K nown +Ġmort ar +est e +ick a +Ar thur +ĠPre viously +ĠMart y +Ġsp arse +g ins +Ġin ward +ĠParticip ant +C opy +ĠM isc +Ġantib iotic +ĠRet ro +Ġel usive +Ġass ail +ĠBatt alion +ĠB ought +Ġdimin ish +ĠEuro pa +s ession +ĠDanger ous +ies el +Ġdisbel ief +Ġbl asts +ext reme +ĠBoy d +ĠProject s +ĠGu ys +Ġunder gone +Ġgr ill +ĠDw ight +Ġ19 7 +US ER +Ġfiles ystem +Ġcl ocks +T aylor +Ġwra pper +Ġfold ing +ous and +ĠPhilipp ine +ATION AL +ĠPer th +Ġas hes +Ġaccum ulate +ĠGate way +Sh op +orks hire +H an +ĠBar rel +ĠLe h +ĠX V +Ġwh im +Ġrep o +ĠC G +ĠM am +Ġincorpor ating +Ġbail out +Ġlingu istic +Ġdis integ +C LE +Ġcinem atic +ĠF iber +S yn +il ion +ĠCom pos +c hens +Ġne oc +Ġbo iled +F INE +on o +un cle +ik en +ĠB M +Î ¹ +Ġreceipt s +Ġdisp osed +ĠTh irty +ĠR ough +ĠA BS +Ġnot withstanding +oll en +# $ +Ġunrel iable +Ġbl oom +Ġmedi ocre +Ġtr am +ĠTas man +Ġsh akes +Ġmanifest o +ĠM W +Ġsatisf actory +Ġsh ores +Ġcomput ation +Ġassert ions +orm ons +ar ag +ab it +Dem ocrats +ĠL oot +ĠVol ks +ha ired +Ġgrav itational +S ing +ĠM iz +Ġthro ttle +Ġtyr anny +ĠView s +Ġrob ber +ĠMinor ity +Ġsh rine +sc ope +pur pose +Ġnucle us +our cing +ĠUS DA +ĠD HS +w ra +ĠBow ie +Sc ale +ĠB EL +x i +I ter +Ġ( ), +w right +Ġsail ors +ous ed +NAS A +ĠPro of +ĠMin eral +t oken +ĠF D +R ew +Ġe ll +6 30 +Ġchance llor +ĠG os +Ġamount ed +ĠRec re +ome z +ĠOpt im +ĠOl ive +Ġtrack er +ow ler +ĠUn ique +R oot +Ġmar itime +ĠQur an +ĠAd apt +Ġecosystem s +ĠRe peat +ĠS oy +ĠI MP +Ġgrad uating +and em +P ur +ĠRes et +ĠTr ick +ĠPh illy +ĠT ue +ĠMalays ian +Ġclim ax +Ġb ury +Ġcons pic +ĠSouth ampton +ĠFl owers +Ġesc orted +ĠEduc ational +ĠI RC +Ġbrut ally +e ating +Ġpill ar +ĠS ang +ĠJ ude +ar ling +ĠAm nesty +Ġrem inding +ĠAdminist rative +hes da +Ġfl ashed +ĠP BS +per ate +fe ature +Ġsw ipe +Ġgra ves +oult ry +26 1 +bre aks +ĠGu er +Ġsh rimp +ĠV oting +qu ist +Ġanaly tical +Ġtables poons +ĠS OU +Ġresear ched +Ġdisrupt ed +Ġj our +Ġrepl ica +Ġcart oons +b ians +} ) +c opy +G ot +ou ched +P UT +Ġsw arm +not ations +s aid +Ġreb uilt +Ġcollabor ate +Ġr aging +Ġn ar +Ġdem ographics +ĠD DR +Ġdist rust +oss ier +ĠK ro +Ġpump kin +Ġreg rets +Ġfatal ities +ĠL ens +ĠO le +p d +Ġpupp et +ĠOut look +ĠSt am +O l +F air +U U +Ġre written +Ä ± +Ġfasc inated +Ġve ctors +Ġtrib unal +u ay +ĠM ats +ĠCo ins +[ [ +Ġ18 1 +Ġrend ers +ĠK aepernick +Ġesp ionage +Ġsum m +Ġd itch +Acc ount +Ġspread sheet +Ġmut ant +p ast +40 7 +Ġd ye +Ġinit iation +Ġ4 000 +Ġpunish able +Ġth inner +ĠKh al +Ġinter medi +D un +ĠGoth am +Ġeager ly +Ġvag inal +p owers +V W +ĠWATCH ED +Ġpred ator +ams ung +Ġdispar ity +Ġ[ * +Ġam ph +Ġout skirts +ĠSpir its +Ġskelet al +Ð » +ĠR ear +Ġissu ance +ĠLog ic +re leased +Z Z +ĠB ound +Ent ry +Ġex its +is ol +ĠFound er +Ġw re +ĠGreen land +ĠM MO +t aker +IN C +ãģ ¾ +Ġhour ly +hen ko +Ġfantas ies +Ġdis ob +Ġdemol ition +ãĥ ĭ +Ġen listed +rat ulations +Ġmis guided +Ġens ured +Ġdiscour aged +m ort +Ġfl ank +Ġc ess +Ġreact s +ĠS ere +s ensitive +ĠSer pent +ass ad +Ġ24 7 +Ġcalm ly +b usters +Ġble ed +ĠSt ro +Ġamuse ment +ĠAntar ctica +Ġs cept +ĠG aw +a q +ason ic +Ġsp rawling +n ative +atur ated +ĠBattle field +IV ERS +E B +ĠG ems +ĠNorth western +ĠFil ms +ĠAut omatic +Ġappre hend +ãģ ¨ +Ġgui Name +Ġback end +Ġevid enced +ge ant +01 2 +ĠS iege +Ġexternal To +Ġunfocused Range +ĠguiActiveUn focused +Ġgui Icon +ĠexternalTo EVA +ĠexternalToEVA Only +F ri +ch ard +en aries +Ġchief s +Ġc f +ĠH UD +Ġcorro bor +Ġd B +ĠT aken +ĠPat ricia +ra il +ĠCh arm +ĠLiber tarian +rie ve +Person al +ĠO UR +ger ies +Ġdump ing +Ġneurolog ical +it imate +ĠClint ons +raft ed +ĠM olly +Ġtermin als +reg ister +Ġfl are +Ġenc oded +Ġautop sy +p el +m achine +Ġexempt ions +ĠRoy als +d istance +Ġdraft s +Ġl ame +ĠC unning +Ġsp ouses +ĠMark ets +ĠCar rier +Ġimp lying +ĠY ak +s id +Ġl oser +Ġvigil ant +Ġimpe achment +Ġaug mented +ĠEmploy ees +Ġunint ended +tern ally +ĠW att +Ġrecogn izable +ess im +æ Ŀ +Ġco ated +r ha +Ġlie utenant +ĠLegisl ation +pub lished +44 4 +01 3 +Ġide ally +ĠPass word +Ġsimpl ify +ĠMet a +ĠM RI +Ġple ading +organ ized +hand ler +Ġun ravel +cor rect +Ġ icy +Ġparan oid +Ġpass er +Ġinspect ions +of er +ĠHealth care +28 3 +ĠBr ut +iol a +for ge +ĠMed ieval +MS N +ie vers +ĠProgram ming +å ī +Ġ2 23 +m u +ĠC LE +ug a +Ġsho ppers +Ġinform ative +ĠPl ans +Ġsupplement ation +ĠT ests +ty ard +ocy tes +ĠVeg a +ĠGujar at +erman ent +Ex cept +ĠL OT +all a +ĠC umm +ĠO sw +Ġven om +ĠDeb t +ĠD OWN +Ġreun ion +Ġm uc +ĠRel ief +Ġge op +ĠðŁ ĺ +al ogue +An th +ech o +Ġcor ros +Ġrepl ication +ĠBl azing +ĠD aughter +Ġinf lic +ĠLind sey +Ù Ī +28 4 +Ex it +Ġgl oom +TA IN +Ġundermin ing +Ġadv ising +h idden +Ġover flow +Ġg or +urd ue +Ġe choes +enh agen +Ġimp uls +d rug +c ash +Ġas ync +Ġmir ac +at ts +p unk +Ġpiv ot +ĠLegisl ative +Ġblog gers +ĠCl aw +s burg +d yl +ĠRecomm end +Ġver te +Ġprohib iting +ĠPant her +Jon athan +Ġo min +Ġhate ful +28 1 +ĠOr che +ĠMurd och +down s +Ġas ymm +G ER +Al ways +Ġinform s +ĠW M +ĠP ony +ĠApp endix +ĠAr lington +J am +Ġmedic inal +ĠS lam +IT IES +Ġre aff +ĠR i +F G +S pring +b ool +Ġthigh s +Ġmark ings +ĠRa qqa +ĠL ak +p oll +ts ky +ĠMort y +ĠDef inition +Ġdeb unk +end ered +ĠLe one +a vers +Ġmortg ages +App arently +N ic +ha us +ĠTh ousands +au ld +Ġm ash +sh oot +Ġdi arr +Ġconscious ly +H ero +e as +ĠN aturally +ĠDestroy er +Ġdash board +serv ices +R og +Ġmillenn ials +Ġinv ade +- ( +Ġcomm issions +ĠA uckland +Ġbroadcast s +Ġfront al +Ġcr ank +ĠHist oric +Ġrum ours +CT V +Ġster il +Ġboost er +rock et +ãĤ ¼ +ut sche +ĠP I +Ġ2 33 +ĠProdu cer +ĠAnaly tics +Ġinval uable +Ġunint ention +ĠC Y +Ġscrut in +Ġg igg +Ġeng ulf +Ġprolet ariat +Ġh acks +ĠH ew +ar ak +ĠSl ime +ield ing +ag her +ĠEll iot +Ġtele com +Ġ2 19 +ult an +ĠAr bor +ĠSc outs +B an +Ġlifes pan +Ġbl asp +38 8 +Ġjud iciary +ĠContin ental +ask ing +Mc C +L ED +Ġbag gage +ĠSorce rer +Ġrem nants +ĠGriff ith +ets u +ĠSub aru +ĠPerson ality +des igned +ush ima +agn ar +Ġrec oil +Ġpass ions +\ ": +Ġte e +Ġabol ition +ĠCreat ing +j ac +Ġ19 4 +01 9 +Ġpill ars +ric hed +/ " +t k +Ġlive lihood +Ġro asted +ah on +ĠH utch +ass ert +Ġdivid end +Ġkn it +Ġd aunting +Ġdisturb ance +Ġsh ale +Ġcultiv ated +Ġrefriger ator +L B +ĠN ET +Ġcommercial s +Ġthink ers +45 5 +Ġch op +B road +Ġsuspic ions +Ġtag ged +l ifting +Ġsty lish +ĠShield s +Short ly +Ġt ails +A uth +ST E +ĠG AME +Ġse ism +ĠK is +olog ne +Ġcow ork +Ġforc ibly +Ġthy roid +ĠP B +AN E +mar ried +h orse +Ġpoly mer +ĠCh al +od or +DE BUG +ĠCon text +Ġbl iss +Ġpin point +ĠMat hemat +leg ram +ĠWeek end +Ġlab elled +Ġb art +it les +Ġest rogen +âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ +" ' +Ġvis ibly +Ġouts ider +aid a +Are a +Ġdisse min +Ġdish onest +ĠCl osed +ĠBullet in +ĠRam sey +sw ord +ĠX I +our ced +S ame +34 6 +ĠRe pe +ĠK ou +c ake +em is +C ache +ĠMe aning +ĠEn light +onom y +Ġmanifest ation +sw orth +J ay +Ġch ore +ö r +D ream +Ġsanction ed +Ġcult urally +ĠA ra +N av +Ġthe ological +Ġstr ut +ĠV O +ĠHand book +Ġconstruct ing +Ġ ¶ +ĠBenef its +ĠPsych ological +s ac +å ¸ +p olicy +ĠMat ters +ĠReport ed +ĠBy te +Ġvit ro +ĠM aiden +Ġl am +ĠJenn ings +Ġgar ment +ĠRut gers +ĠStaff ord +ĠWell ington +Ġinter mitt +Ġn pm +Ġord eal +Ġplug ged +o oming +in ished +fram ework +Ġtim ber +Ġc ass +Ġ8 50 +il ess +ĠRed ux +7 68 +St re +Ġsurpass ed +w hel +Ġparalle ls +Ġve il +ĠG I +ĠR EST +Ġread iness +s ort +Ġmod ifying +ĠSl ate +ru ff +Ġmar ble +Ġinf rared +Ġaud itor +ĠFANT ASY +ĠP overty +ĠS PD +Ġ" ( +K y +RA Y +Ġexecut ions +ĠBever ly +ĠMarx ism +ĠBur st +ĠK ali +est ones +Clear ly +E ll +ãģ § +ĠProceed ings +T oken +IF IC +ñ a +Cent ral +ĠH aley +ĠD rama +Ġform ations +OR N +Book s +Ġdom inating +ĠFly ers +ĠCompan ion +Ġdiscipl ined +ĠYug oslav +ĠSpell s +Ġv engeance +Ġland lords +L en +ĠO gre +ano ia +Ġpier cing +Ġcon greg +Ġscore r +ob ia +Ġnic kel +ĠLear ns +Ġre jo +Ġmaster piece +Fl ash +Ġinhab ited +ĠOpen GL +ĠD ud +ĠI CO +Ġar ter +Ġpl ur +Ġmaster y +Ġlong standing +st ed +Ġw ines +Ġtelev ised +ĠSh rine +ĠBay ern +Ġâ ĵĺ +Ġencl osure +j ohn +Ġprophe ts +ĠRes urrection +ĠOrd ers +Ġun even +r als +Ġd wind +ĠL ah +ĠSl oven +37 8 +Ġins istence +aff le +ĠCl one +Ġhard ship +ĠCongress man +Ġple ad +Ġreview ers +Ġc ured +Ġ19 35 +as ley +f ake +ĠTh inking +yd ia +P ART +ĠD ota +o it +Ġwh ipped +Ġb ouncing +ĠHispan ics +com ings +Ġcann abin +ĠCh ambers +ĠZ ack +Option al +Ġco ats +Ġprow ess +ĠNort on +Ġplain ly +Ġfre ight +Ġinhib ition +Ġcl am +Ġ30 3 +ke f +ale igh +L uke +Ġpsych o +ator ium +M ED +Ġtreat ies +Ġind isc +Ġd c +OP S +Ġresil ient +ĠInter state +Ġsl ack +Ġmund ane +Ġestab lishes +35 9 +Ġstr ained +Ġn ond +S us +Ġcast e +ar ate +ie ving +Ġunfair ly +Ġpars er +on ial +urs ive +V ia +ĠOtt o +ĠAuthor ities +stro ke +K R +ĠMer cy +Ġfurn ished +Ġout set +Ġmet ic +19 82 +olith ic +ĠT ent +og ical +ĠA ircraft +Ġh ides +ĠBec ame +Ġeduc ators +re aching +Ġvol atility +Ġtodd ler +ĠNAS CAR +ĠTw elve +ĠHigh lights +Ġgra pe +Ġspl its +Ġpe asant +Ġre neg +ĠMS I +Tem p +st ars +Ġtre k +ĠHy de +b inding +Ġreal ism +Ġox ide +ĠH os +Ġmount s +Ġbit ing +Ġcollaps ing +Ġpost al +Ġmuse ums +Ġdet ached +Ġrespect ing +Ġmonop ol +Ġwork flow +ĠC ake +Tem plate +ĠOrgan isation +Ġpers istence +36 9 +C oming +B rad +Ġredund ant +ĠG TA +Ġb ending +Ġrev oked +Ġoff ending +Ġfram ing +Ġprint f +Comm un +mem bers +Out side +Ġconst rued +Ġc oded +F ORE +Ġch ast +Ch at +Ind ian +ĠY ard +? !" +ĠP orts +ĠX avier +ĠR ET +' ." +ĠBo at +iv ated +ich t +umer able +D s +ĠDun n +Ġcoff in +Ġsecure ly +ĠRapt ors +ĠB es +Install ation +Ġin ception +ĠHealth y +end ants +Ġpsych ologists +ĠShe ikh +c ultural +ĠBlack Berry +sh ift +F red +oc he +Ġc akes +ĠS EO +ĠG ian +ĠAs ians +og ging +e lement +Ġpund its +ĠV augh +ĠG avin +Ġh itter +Ġdrown ed +Ġch alk +ĠZ ika +Ġmeas les +80 2 +âĢ¦ .. +ĠAW S +] " +Ġdist ort +ĠM ast +Ġantib odies +ĠM ash +Mem ory +ĠUg anda +ĠPro b +Ġvom iting +ĠTurn s +Ġoccup ying +Ġev asion +ĠTher apy +Ġprom o +Ġelect r +Ġblue print +ĠD re +pr iced +ĠDep ot +Ġallev iate +ĠSom ali +m arg +n ine +Ġnostalg ia +ĠShe pherd +Ġcaval ry +Ġtor ped +ĠBlood y +x b +Ġs ank +Ġgo alt +report print +embed reportprint +clone embedreportprint +ĠIn itially +ĠF ischer +Ġnot eworthy +c ern +Ġin efficient +raw download +rawdownload cloneembedreportprint +c ation +ĠD ynasty +l ag +D ES +Ġdistinct ly +ĠEston ia +Ġopen ness +Ġg ossip +ru ck +W idth +ĠIb rahim +Ġpet roleum +Ġav atar +ĠH ed +ath a +ĠHog warts +Ġc aves +67 8 +Ġsafegu ard +ĠM og +iss on +ĠDur ham +sl aught +ĠGrad uate +Ġsub conscious +ĠEx cellent +ĠD um +---- - +Ġp iles +ĠW ORK +ĠG arn +ĠF ol +ĠAT M +Ġavoid s +ĠT ul +Ġble ak +EL Y +iv ist +light ly +P ers +ĠD ob +ĠL S +Ġins anity +Î µ +atal ie +En large +Ġtw ists +Ġfault y +Ġpir acy +Ġimp over +Ġrug ged +ĠF ashion +Ġs ands +' ? +sw ick +Ġn atives +Ġhe n +ĠNo ise +ãĥ Ĺ +Ġg reens +Ġfree zer +Ġd ynasty +ĠFather s +ĠNew ark +Ġarchae ological +Ġo t +ob ar +Ġblock ade +Ġall erg +L V +Ġdeb it +ĠR FC +ĠMil ton +ĠPress ure +Ġwill ingly +Ġdisproportion ate +Ġopp ressive +Ġdiamond s +Ġbelong ings +19 70 +Ġbell s +Ġimperial ism +Ġ2 27 +Ġexpl oding +ĠE clipse +Ġ19 19 +Ġr ant +Ġnom inations +34 7 +Ġpeace fully +ric a +ĠF UCK +Ġvib ration +mal ink +Ġro pes +ĠIv anka +ĠBrew ery +ĠBook er +ĠOw ens +go ers +Serv ices +ĠSn ape +Ġ19 1 +39 5 +Ġ2 99 +just ice +Ġb ri +Ġdisc s +Ġprom inently +Ġvul gar +Ġsk ipping +l ves +Ġtsun ami +37 4 +ĠU rug +ĠE id +rec ated +p hen +Ġfault s +ĠStart ed +9 50 +Ġp i +Ġdetect or +Ġbast ard +Ġvalid ated +Space Engineers +OUR CE +Ġ( ~ +Ġuns ur +Ġaff irmed +Ġfasc ism +Ġres olving +ĠCh avez +ĠC yn +Ġdet ract +L ost +Ġrig ged +Ġhom age +ĠBrun o +55 5 +ec a +Ġpress es +Ġhum our +Ġsp acing +Ġ' / +olk ien +C oun +OP ER +T re +S on +ĠCambod ia +ier re +m ong +o zy +Ġliquid ity +ĠSov iets +ĠFernand o +Ġ2 29 +Ġsl ug +ĠCatal an +elect ric +Ġsc enery +ĠH earth +Ġconst rained +Ġgoal ie +ĠGu idelines +ĠAm mo +ĠPear son +Ġtax ed +Ġfet us +Resp onse +ĠAlex is +th ia +G uy +Ġrecon struct +Ġextrem es +Ġconclud ing +ĠP eg +ook s +Ġded uctions +R ose +Ġground breaking +ĠT arg +ãĥ ģ +ĠRe ve +res ource +Ġmo ons +Ġelectrom agnetic +Ġamid st +ĠVik tor +N ESS +B ACK +Ġcomm ute +ĠAna heim +Ġfluct uations +6 40 +Ġnood les +ĠCop enhagen +ĠT ide +ĠGri zz +ĠS EE +Ġpip elines +Ġsc ars +end o +ag us +ĠE TF +/ # +ĠBec ome +44 8 +Ġvis c +ĠRecomm ended +Ġj umper +Ġcogn ition +Ġassass in +Ġwitness ing +ĠSet up +Ġl ac +v im +IS M +p ages +SS L +35 8 +Ġad ject +indust rial +l ore +cher y +Ġgl itter +Ġc alf +Flor ida +Ġspoil ers +Ġsucceed s +Ġch anting +Ġslog ans +ĠTr acy +Vis it +rol ogy +Ġm ornings +Ġline age +Ġs ip +Ġintense ly +Ġflour ish +ĠSle eping +ĠF em +or por +ĠK lan +ĠDar th +h ack +ĠNi elsen +Ġtum ors +Ġprocure ment +ĠY orkshire +Ġra ided +K Y +An na +Ġ// [ +ĠDis order +ĠMust ang +ĠW en +ĠTry ing +s q +Ġdeliver ies +Ġshut ter +Ġcere bral +Ġbip olar +ĠC N +l ass +j et +Ġdeb ating +> : +Ġe agle +gr ades +ĠD ixon +UG C +M AS +ĠDr aco +ĠMach ines +aff er +Ġem an + ² +pr on +ĠG ym +Ġcompar atively +ĠTrib unal +PR O +Ġle x +Ġfert ile +Ġdep ressing +Ġsuperf icial +ess ential +ĠHun ters +g p +Ġprom inence +L iber +ĠAn cest +ote chnology +Ġm ocking +ĠTra ff +ĸ ļ +Med ium +I raq +Ġpsychiat rist +Quant ity +ĠL ect +Ġno isy +5 20 +G Y +Ġsl apped +ĠM TV +Ġpar a +p ull +Mult iple +as her +Ġn our +ĠSe g +Spe ll +v ous +ord ial +Sen ior +ĠGold berg +ĠPl asma +ne ed +Ġmess enger +ere t +Ġteam ed +Ġliter acy +ĠLe ah +ĠD oyle +Ġem itted +U X +Ġev ade +Ġm aze +Ġwrong ly +ĠL ars +Ġstere otype +Ġpled ges +Ġarom a +ĠM ET +Ġac re +ĠO D +Ġf f +Ġbrew eries +ĠH ilton +und le +ĠK ak +ĠThank fully +ĠCan ucks +in ctions +ĠApp ears +Ġco er +Ġundermin ed +ro vers +And re +Ġbl aze +um ers +Ġfam ine +amp hetamine +ulk an +Am ount +Ġdesper ation +wik ipedia +develop ment +ĠCor inth +uss ia +Jack son +L I +N ative +R s +Oh io +ĠKath leen +F ortunately +Ġattend ant +ĠPre ferred +ĠDid n +ĠV s +M is +Ġrespond ent +Ġb oun +st able +Ġp aved +Ġunex pl +ĠChe ney +L M +ĠC ull +bl own +Ġconfront ing +oc ese +serv ing +W i +ĠLith uania +ann i +Ġst alk +h d +Ġv ener +AP H +ynchron ous +UR R +um ably +hist oric +H alf +H ay +Ġresil ience +spe ction +Ġabandon ing +O bs +ĠDeb bie +Ġgrad ient +ĠPl aint +ĠCan al +AR CH +Ġexpans ive +Ġfun g +Ġb ounced +U nd +Ġprec autions +Ġclar ification +Ġd agger +Ġgri ps +Ġ µ +ĠRiver a +ĠUnd ead +is ites +ĠFIR ST +ñ o +aud i +Ġhost ages +Ġcompl iant +Ġal umni +Se ven +Ġcyber security +e ither +Col lect +Ġinvari ably +ĠS oci +Ġlaw maker +Ġa le +ĠPerson ally +N azi +Ġcustom ization +ĠPro c +ĠSask atchewan +eat uring +Ġsp ared +Ġdiscontin ued +Ġcomput ational +ĠMotor ola +Ġsuprem acist +government al +Ġparad ise +ĠDown ing +ĠNik on +Ġcat alyst +ber ra +Tor onto +8 75 +bet a +ĠMac ron +Ġunreal istic +ve ctor +ĠVeh icles +it iveness +ĠR V +ĠCol bert +s in +o ji +ent in +ĠKr ish +hell o +ff ield +ok y +ĠT ate +Ġmap le +Ġa ids +chem ical +33 4 +n uts +ĠWar p +Ġx x +ĠRob b +umer ous +_- _ +ft ime +ĠV W +Ġw inger +ĠD ome +t ools +ĠP V +ĠGe orgetown +Ġg eared +Ġjihad ists +Ġc p +Ġster oids +M other +cler osis +ĠDR M +nes ia +Ġl inger +Ġimm ersive +ĠC OUN +Ġoutwe igh +ens ual +B and +Ġtransform s +mat ched +ps ons +ĠJud icial +f actor +Ġrefer ral +Ġodd ly +ĠW enger +B ring +ĠB ows +60 2 +IC LE +Ġl ions +ĠAcad emic +ĠTh orn +ĠRa ider +kef eller +St orage +L ower +ĠOr t +ĠEqu ality +AL T +ĠS OC +T ypes +Ġl yn +ĠAss et +co at +TP P +C VE +ĠPione er +app lication +Mod ern +ĠH K +En vironment +Al right +R ain +IP P +ĠShi ite +Ġm ound +ĠAb ilities +cond ition +St aff +Ġcompet ence +ĠM oor +ĠDi ablo +Ġwith held +Ġost ensibly +ĠB rom +Ġms g +Ġden omin +ĠRef erences +ĠF P +Ġplun ged +Ġp amph +m oving +cent ral +Ġdown right +Ġf ading +T al +T yp +ĠTh y +uk es +it he +Ġo ve +Ġbatt led +Ġseaf ood +Ġfig ur +ĠR D +c rop +Ġsqu ads +{ \ +à ¹ +ĠE h +Ġinterview ing +ĠQ in +Ġas piring +PL IC +Ġcla uses +ĠG ast +ĠN ir +Ġl uggage +Ġh ose +Ġsystem d +Ġdesc ending +ĠRev ised +ĠR ails +al ign +70 9 +33 7 +Ġf ug +charg ing +t ags +Ġut er +k ish +WAR NING +49 0 +prof its +Ġvoy age +Ġa ce +ĠV anguard +ĠT anks +ĠM uk +Ġ2 26 +S afe +Ar mor +Ġvolcan ic +Ġwom b +ĠM IL +Ġbegin ner +ĠRec ogn +ĠA AP +PL AY +) ! +Ġdetect ing +c n +Ġbre aches +Bas ically +ĠP ag +ĠMunicip al +ĠInd ie +ĠL af +ĠDis able +ĠOl son +Ġrest rained +Ġrul ings +Ġhum ane +ev ents +ĠCinem a +display Text +ĠH atch +action Date +onna issance +Ġassault ing +ĠL ug +CH AT +Ġvig orous +ĠPer se +Ġintoler ance +ĠSnap chat +ĠSh arks +Ġd ummy +ĠDi agn +ĠGu itar +im eters +40 3 +RE G +A x +Ġsepar ates +ĠMah m +Ġt v +j ah +O OL +C irc +ĠWinds or +uss ian +Ġintu ition +Ġdis dain +ĠDon ovan +Ġ2 21 +E mb +Ġcondem ning +Ġgener osity +zz y +Ġpant ies +ĠPre vent +Action Code +AN A +34 2 +external ActionCode +Ġspec ifying +Ġcryst all +J ere +Ġru pt +ĠApp rentice +Ġprof iling +Ð º +St rike +Ġsid eline +Ġoblig ated +Ġocc ult +Ġbureaucr atic +ant ically +rupt ed +neg ative +ĠEthiop ia +ĠC ivic +Ġins iders +el igible +ĠTV s +ĠB AR +ĠT I +i ologist +ĠA IR +Ġsubstit uted +Ar ab +ĠS aul +ĠY og +p rem +Ġbuild ers +Ġstation ary +Ġdoubt ful +Ġvig orously +Ġthr illing +Ph ysical +ĠCare y +ĠHyd ra +geon ing +ĠS ly +y ton +Ġborrow ers +ĠPark inson +Ġ ë +ĠJama ica +Ġsat ir +Ġinsurg ents +ĠF irm +Ġis ot +ĠK arn +our ning +ak ens +doc s +l ittle +ĠMon aco +CL ASS +Tur key +L y +ĠCon an +ass ic +Ġstar red +ĠPac ers +et ies +Ġt ipping +M oon +ĠR w +s ame +Ġcav ity +Ġgo of +ĠZ o +Sh ock +um mer +Ġemphas izes +Ġreg rett +Ġnovel ty +Ġen vy +ĠPass ive +r w +50 5 +Ġind ifferent +ĠR ica +ĠHim self +ĠFred die +Ġad ip +ä¸ Ģ +Ġbreak out +Ġhur ried +ĠHu ang +ĠD isk +Ġro aming +?????- ?????- +U V +ĠRick y +ĠS igma +Ġmarginal ized +Ġed its +Ġ30 4 +mem ory +Ġspec imen +29 3 +ãģ ¯ +Ġvert ically +Ġaud ition +ĠHe ck +Ġc aster +ĠHold ings +ad al +ĠC ron +ĠL iam +Ġdef lect +P ick +ĠDeb ug +RE F +Ġvers atility +ot hes +class ified +ĠMah ar +ĠH ort +C ounter +st asy +not iced +33 1 +ĠSh im +f uck +ĠB ie +Ġair ing +ĠPro tein +ĠHold ing +Ġspect ators +ili ated +ĠThat cher +n osis +ãĥ¼ ãĥ³ +Te le +B oston +ĠTem pl +st ay +Ġdecl arations +47 9 +Vol ume +ĠDesign er +ĠOver watch +id ae +Ġon wards +Ġn ets +ĠMan ila +part icularly +Ġpolit ic +o other +Ġport raits +Ġpave ment +c ffff +Ġs aints +Ġbegin ners +ES PN +Ġshort comings +âķIJ âķIJ +Ġcom et +ĠOrgan ic +qu el +Ġhospital ized +Bre ak +Ġpe el +dyl ib +asp x +ur ances +ĠT IM +P g +Ġread able +ĠMal ik +Ġm uzzle +Ġbench marks +d al +ĠV acc +ĠH icks +60 9 +ĠB iblical +he ng +Ġover load +ĠCivil ization +Ġimm oral +Ġf ries +ãĤ Ĵ +Ġreprodu ced +Ġform ulation +j ug +ire z +g ear +Ġco ached +Mp Server +ĠS J +ĠK w +In it +d eal +ĠO ro +ĠL oki +ĠSong s +Ġ23 2 +ĠLou ise +asion ally +Ġunc ond +olly wood +Ġprogress ives +ĠEn ough +ĠDo e +Ġwreck age +Ġbr ushed +ĠBase Type +Ġz oning +ish able +het ically +ĠC aucus +ĠH ue +Ġk arma +ĠSport ing +Ġtrad er +Ġseem ing +ĠCapt ure +4 30 +b ish +Ġt unes +Ġindo ors +ĠSp here +ĠD ancing +TER N +Ġno b +ĠG ST +m aps +Ġpe ppers +F it +Ġoverse es +ĠRabb i +ĠR uler +vert ising +off ice +xx x +Ġra ft +Ch anged +Ġtext books +L inks +ĠO mn +ãĢ ij +Ġinconven ience +ĠDon etsk += ~ +Ġimplicit ly +Ġboost s +ĠB ones +ĠBo om +Cour tesy +Ġsens ational +AN Y +Ġgre edy +ed en +Ġinex per +ĠL er +ĠV ale +Ġtight en +ĠE AR +ĠN um +Ġancest or +S ent +ĠH orde +urg ical +all ah +Ġsa p +amb a +ĠSp read +tw itch +Ġgrand son +Ġfract ure +Ġmoder ator +ĠSe venth +ĠRe verse +Ġestim ation +Cho ose +Ġpar ach +Ġbar ric +ãĢ IJ +Ġcomp ass +Ġall ergic +âĢ ķ +OT HER +err illa +Ġw agon +Ġz inc +Ġrub bed +ĠFull er +ĠLuxem bourg +ĠHoo ver +Ġli ar +ĠEven ing +ĠCob b +est eem +Ġselect or +ĠB rawl +is ance +ĠE k +Ġtro op +Ġg uts +ĠApp eal +ĠTibet an +Ġrout ines +ĠM ent +Ġsummar ized +steam apps +Ġtr anqu +Ġ19 29 +or an +ĠAut hent +Ġg maxwell +Ġappre hens +Ġpo ems +Ġsa usage +ĠWeb ster +ur us +Ġthem ed +Ġl ounge +Ġcharg er +Sp oiler +Ġsp illed +h og +ĠSu nder +ĠA in +ĠAng ry +Ġdis qual +ĠFrequ ency +ĠEther net +Ġhel per +Per cent +Ġhorr ifying +Ġa il +ĠAll an +EE E +ĠCross ing +44 9 +Ġh olog +ĠPuzz les +ĠGo es +eren n +60 4 +ãģ ı +ĠRaf ael +Ġatt en +ĠE manuel +Ġup ro +ĠSus p +P sych +ĠTr ainer +ĠN ES +ĠHun ts +bec ue +Ġcounsel or +R ule +Ġtox ins +Ġb anners +r ifice +Ġgreet ing +Ġfren zy +Ġall ocate +Ġ* ) +ex pr +50 3 +ĠCh ick +ĠT orn +Ġconsolid ation +ĠF letcher +sw itch +fr ac +cl ips +ĠMcK in +ĠLun ar +Mon th +IT CH +Ġscholar ly +rap ed +39 8 +Ġ19 10 +Ġe greg +Ġin secure +Ġvict orious +cffff cc +Ġsing led +Ġel ves +ĠW ond +bur st +Ġcam oufl +ĠBL ACK +Ġcondition ed +ç ī +ans wered +Ġcompuls ory +asc ist +Ġpodcast s +ĠFrank furt +bn b +Ġne oliberal +ĠKey board +ĠBel le +w arm +Ġtrust s +Ġins ured +ĠBu cc +us able +60 7 +ĠPl ains +Ġ18 90 +Ġsabot age +Ġlod ged +f elt +Ġg a +ĠN arc +ĠSal em +Ġsevent y +ĠBl ank +p ocket +Ġwhis per +Ġm ating +om ics +ĠSal man +ĠK ad +Ġan gered +Ġcoll isions +Ġextraord inarily +Ġcoerc ion +G host +b irds +è Ģ +k ok +Ġper missible +avor able +Ġpo inters +Ġdiss ip +ac i +Ġtheat rical +ĠCos mic +Ġforget ting +Ġfinal ized +å¤ § +y out +l ibrary +Ġbo oming +ĠBel ieve +ĠTe acher +ĠL iv +ĠGOOD MAN +ĠDomin ican +OR ED +ĠPart ies +Ġprecip itation +ĠSl ot +R oy +ĠComb ined +Ġinteg rating +Ġch rome +Ġintest inal +ĠRe bell +Ġmatch ups +Ġblock buster +ĠLore n +ĠLe vy +Ġpre aching +ĠS ending +ĠPur pose +ra x +f if +Ġauthor itative +ĠP ET +ast ical +Ġdish on +Ġchat ting +Ġ"$ :/ +Connect ion +Ġrecre ate +Ġdel inqu +Ġbro th +ĠD irty +ĠAd min +z man +Ġscholars hips +Ġ25 3 +cont act +als a +7 67 +c reen +abb age +Ġ19 15 +Ġbl ended +Ġal armed +L anguage +35 6 +Ġbl ends +ĠCh anged +W olf +Ġhe pat +Creat ing +Ġper secut +Ġsweet ness +art e +Ġforfe iture +ĠRober to +im pro +N FL +ĠMag net +Det ailed +Ġinsign ificant +ĠPOL IT +ĠBB Q +ĠC PS +Ġse aw +amin er +m L +end if +f inals +Ġ26 5 +u ish +Ġ} ) +ĠPro blems +Ġem blem +Ġserious ness +Ġpars ing +Ġsubst itution +Ġpress ured +Ġrecy cled +ale b +Rub y +Ġprof iciency +Dri ver +ĠW ester +: ' +AF TA +Ġm antle +ĠClay ton +fl ag +Ġpractition er +c overed +ĠSt ruct +add afi +4 25 +ĠTown ship +ĠHyd ro +Lou is +34 3 +Ġcond o +ĠT ao +Ġutil ization +Ġnause a +ĠDem s +rid ges +p ause +Ġform ulas +Ġchall enger +37 6 +Ġdefect ive +ĠRail way +ĠPub Med +Ġyog urt +l bs +ĠNor folk +OP E +ĠMood y +Ġdistribut or +Ġscroll s +Ġextract s +St an +Ġv iability +Ġexp oses +Ġstar vation +ĠStep s +ĠD odd +f ew +ST D +33 2 +Ġclos ures +Ġcomplement ary +ĠS asha +ump y +Ġmon et +Ġartic ulate +ĠDo ct +k iller +Ġsc rim +Ġ2 64 +Ġprost itutes +Ġse vered +Ġattach ments +Ġcool ed +L ev +ĠF alk +f ail +Ġpolic eman +ĠD ag +Ġpray ed +ĠK ernel +Ġcl ut +Ġc ath +Ġan omaly +St orm +em aker +ĠBreak fast +ul i +o ire +J J +h z +Oper ation +ĠS ick +35 4 +ĠGuatem ala +R ate +Ġexp osures +f aces +ĠArch ae +ra f +ĠM ia +Ġ20 25 +Ġop aque +Ġdisgu ised +ĠHead quarters +S ah +Ġp ots +9 78 +ĠM alf +Ġfrown ed +Ġpoison ous +ĠCon vers +ee ks +Ġcr ab +." " +Ġtre ason +Ġr anc +Ġescal ating +Ġwar r +Ġmob s +Ġl amps +ĠSun shine +ĠBrun swick +Ph ones +Ġspe lled +ĠSk ip +Ġ20 50 +Ġ19 11 +ĠPl uto +ĠAm end +Ġme ats +38 7 +Ġst omp +ĠZh ou +ĠLevi athan +ĠHaz ard +ad v +ĠOr well +Ġal oud +Ġb umper +ĠAn arch +ub untu +ĠSer ious +f itting +ĠOption al +ĠCec il +RE AM +Ġser otonin +Ġcultiv ate +ag ogue +} \ +Ġmos ques +ĠSun ny +Ġre active +rev olution +ĠL up +ĠFed ora +Ġdefense man +ĠV ID +ist ine +Ġdrown ing +ĠBroad casting +Ġthr iller +ĠS cy +Ġacceler ating +Ġdirect s +od ied +b ike +d uration +Ġpain fully +R edd +Ġproduct ions +Ġg ag +Ġwh ist +Ġs ock +Ġinf initely +ĠConc ern +ĠCit adel +Ġlie u +Ġcand les +ogene ous +arg er +Ġheaven ly +inflamm atory +Per formance +C s +ruct ose +az aki +Ġp essim +Ġinf erence +Ġpow d +ĠZ oe +Ġpain ts +Ġd azz +pt a +-------- --- +Ġins pir +ĠExper imental +ĠKn ife +reg or +b ors +Ġshow ers +rom eda +Ġs aint +Ġben ign +ĠJ iang +Ġenvision ed +Ġsh roud +IF T +H O +Ġsh uff +ĠI CC +Ġse greg +Ġrevis it +ighth ouse +L i +Ġsub strate +ĠSe as +ĠRew ard +ĠH ep +ĠBr ass +s bm +Ġelim inates +Ġst amina +ĠV AT +ĠLo an +Ġconst raint +Ġappropri ated +Ġp es +ĠA LE +r anging +Ġ40 4 +39 2 +Ġintellectual s +ach u +Ġrestruct uring +ĠLe vin +Ġrun es +Ġdelight ful +Ġcarbohyd rates +ĠMod els +ĠExp o +Ġtransport ing +all oc +Ġring ing +S amsung +Ġscarce ly +ĠURL s +ĠM AS +Ġprot otypes +Ġnarr ator +ĠCPU s +cd n +ĠBart on +Ġdecided ly +ĠSh u +ix ir +oc ious +ĠMy st +N intendo +Ġre use +Ġforg iven +F ew +in ical +n at +Ġseam less +ĠEv a +ĠE VE +ĠJ O +land ers +Ġso fter +neg ie +Ġtrans ient +Ġorb ital +Ġfulf il +ĠK om +Hop efully +Ġdynam ically +ĠHun ger +å Ľ +ĠArmen ia +el man +ber to +Ġp ige +ĠID s +lim it +Ġve ins +Ġso aring +p acks +Gold en +ĠCr ab +ist or +ĠR PM +Ġ$ $ +g ression +Ġjihad ist +Ġgam ble +Ġcare g +Ġinf lated +F ace +ĠFire arms +ĠEm manuel +â Ŀ +Ġsh ocks +gr ab +Ġspl end +ĠHP V +ab ortion +Ab ove +Ent ity +play ers +Ġcomm enced +ul ence +Ġfulfill ment +Ġembod iments +ĠW elfare +Ġha il +Ġ< @ +tt en +Ġcat cher +ĠJ azeera +Ġvolcan o +Ġstabil ize +ĠHand ler +Ġintens ified +ĠAb rams +Ġhum iliation +p aced +60 5 +ĠCent OS +Spe cific +Ġhe ed +ĠC AM +ĠGal ile +D ie +Ġabol ished +ĠThom son +ĠTe achers +ĠW ass +j ong +ĠIS BN +ĠAll ies +sh ake +å · +v ict +How ard +Ġde em +Ġexceed ingly +ĠSmart stocks +ib e +Ġdoor way +Ġcompet ed +ig mat +Ġnational ists +Ġg room +ĠKe en +Ġdispos able +de cl +ĠT olkien +ĠSche me +Ġb iod +Ġav id +ĠEl on +ag ar +ĠT SA +R oman +Ġartific ially +Ġadvis ors +X L +ĠInf erno +36 6 +Ġted ious +ĠPhot ography +ĠCar rie +Ġtro pe +ĠSand ra +Ġdec imal +Que en +ĠGund am +ĠO M +ote ch +N BA +Ġ19 32 +Ġent renched +ĠMar ion +Ġfr aternity +Lab our +Hen ry +Ġlat itude +E ither +Ġenh ances +ĠPot ential +Ġsh ines +id ad +Ġbread th +Ġcapac ities +ĠðŁ ĻĤ +ĠBron x +Ġsex es +Ġdifferent iation +Ġheavy weight +ĠT aj +d ra +Ġmigr ate +Ġexhaust ion +ĠR UN +els ius +ĠCu omo +Ġgu itars +Ġcl ones +ĠSom ew +ĠP ry +------------ - +Ġwarr anted +cy cles +Ġsalv age +Ġdis ks +R ANT +ĠNGO s +ĠMart ian +":[ {" +Ġadd icts +oj ure +il let +Ġamazing ly +art ments +p ixel +ĠGPU s +Lay out +è £ +ĠTam il +ĠBas il +Ġimpart ial +ĠSt ructure +f ork +b ryce +Ġr idge +ĠHamb urg +ri ous +Ġbl itz +cig arettes +Ġcan ned +40 2 +Ġiron ically +Ġcompassion ate +ĠHaw kins +. # +ĠCat hedral +Ġrall ied +in ternal +Ġqu ota +st akes +T EXT +m om +Ġcomple tes +Ġ23 8 +Ġsh rug +ãĥ ij +ĠN inth +Ġrev ise +ĠProv ider +Ġtre acher +Ġqu asi +ĠPR ES +Ġdep osition +Ġconfidential ity +iss ors +Ġim balance +Ġspan ning +Ġang ular +ĠC ul +commun ication +ĠNor a +ĠGen ius +op ter +Ġs acked +Sp ot +Ġfine ly +ĠCH R +28 2 +w aves +Pal est +ĠRo hing +N L +è ¿ +Ġsh itty +ĠSc alia +4 75 +Pro gress +Ġreferen cing +Ġclass rooms +ab ee +Ġs od +hes ion +70 8 +ĠZucker berg +ĠFin ish +ĠScot ia +ĠSav ior +ĠInstall ation +an tha +( - +Ġ30 2 +ĠP unk +Ġcr ater +yout u +Ġro ast +Ġinflu encing +Ġd up +ĠJ R +ĠG rav +Ġstat ure +Ġbath rooms +A side +W iki +me an +ĠZ ak +ĠOn es +ĠN ath +Ġhyper t +Ġcommence ment +C ivil +Ġmoder ately +Ġdistribut ors +Ġbreast feeding +Ġ9 80 +ĠS ik +ĠC ig +ĠAM ER +R IP +ĠCare er +ust ing +Ġmess ed +Ġe h +ĠJ ensen +/ $ +Ġblack mail +Ġconvers ions +Ġscientific ally +Ġmant ra +p aying +Ġiv ory +ĠCour ts +OU GH +aunt let +Ser ial +B row +ĠH undreds +3 23 +Ġpe e +Ġlin ux +Ġsub mer +ĠPrinc ipal +48 5 +ĠD SL +ĠCous ins +Ġdoctr ines +ĠAthlet ics +Ġ3 15 +ĠK arma +Ġatt ent +ur ger +Ġpresc ribe +Ġenc aps +ĠC ame +Ġsecret ive +ĠCr imes +d n +C lean +ĠEgypt ians +ĠCar penter +Ġ ll +H um +ĠMil o +Ġcapital ists +Ġbrief ed +T we +ĠBas in +elve t +M os +Ġplun ge +ĠKa iser +ĠFu j +ill in +Ġsafegu ards +Ġo ste +ĠOpportun ity +ĠM afia +ĠCall ing +ap a +ur ban +br ush +ill ard +c é +int elligence +ĠL ob +ĠDru id +Ġsm oother +Ġfoot ing +Ġmotor ists +arc ity +Ġmascul inity +Ġm ism +Ġabdom inal +ĠTa vern +ĠR oh +Ġesc apes +s igned +Anth ony +Ġsacrific ing +Ġintim acy +Ġan terior +ĠK od +Ġmot if +Ġg raz +Ġvisual ization +Ġguitar ist +ĠTro tsky +m agic +D ar +ĠMor i +Ġw ards +Ġtoile ts +l est +Ġtele port +ĠSund ays +ĠPl at +ET S +Ġe Sports +Pat rick +ĠK atherine +en ko +Ġhas sle +ĠM ick +gg les +Ġh ob +aint ain +Ġair borne +Ġsp ans +Ġch ili +Ġa perture +Ġvolunte ered +ĠInc ident +ĠF res +ĠVeter an +augh tered +ing o +Ġun insured +CL OSE +Ġf use +Ġer otic +Ġadvert ise +ra ising +Text ure +Ġatt ends +ĠRE AL +udd led +Ġsm oot +Ġ30 5 +ĠWill is +Ġbl ond +An alysis +ĠV T +on ica +Ġstrongh old +R F +N M +. >> +Ġprosper ous +Ġbo asted +29 2 +ĠManufact uring +PR ESS +g ren +Ġpharm acy +ĠRoc kefeller +k ai +Ġth umbs +ĠH ut +Ġmother board +Ġguard ians +ĠAl ter +ll ular +Ġsh ack +Ġwise ly +Ġback bone +erv a +Ġsu icides +ĠMcG regor +ij ah +E mer +ĠB rav +Ġdesign ate +P OST +produ ced +Ġcleans ing +irl wind +ex istent +ĠHum ph +ĠPay ne +Ġv ested +Å ¡ +Ġstring ent +ion a +Ġuns ub +Ġsum med +ĠHer cules +sub ject +ĠR agnar +ĠN os +Ġcharacter ization +Ġsav vy +ĠDaw son +ĠCas ino +Ġf ri +ĠBar rier +Ġmis information +Ġins ulation +Ġcorrid ors +Ġair planes +ĠNo ct +ah i +Ġ19 16 +k b +arm ac +Ġsh un +Ġsche ma +Ġhorr ified +Ġ23 9 +aund ers +N B +i ates +er ity +ĠSh ard +Ġr arity +Ġgroup ed +ĠGh ana +again st +ĠBi ological +ĠA ware +ow ell +Ï Ħ +ĠBe au +sh aw +H ack +ĠJul ius +US S +ol son +aun a +c ru +ĠMaur ice +ĠI k +Ġsequ encing +Ġradical s +Ġ( ?, +v irtual +Ġany ways +Ġreper c +Ġhand lers +Ġhes itant +é ĥ +ĠM F +ple mentation +ass ociated +Ġcampaign ed +ĠY ue +ut ations +ĠY oga +Ġsim mer +Ġro ds +Ġmel ody +Ġconv oy +v ideos +Ġscreen ed +N eg +ochem ical +Ġ( )) +Ġultr as +Ġant ip +ĠIsland ers +70 4 +Ġfet ish +Ġridic ulously +ĠK art +Ġmitochond rial +Ġinterf ering +Build er +Ġover fl +Ġac ne +ĠM ud +ĠK err +f lex +ĠPost al +ĠBalt ic +47 7 +ĠPers ons +our age +H B +ĠM use +ĠImm ortal +ĠDri ving +Ġpet itions +Ġsubsc ript +Ġs orce +ĠProcess or +ut on +S ony +Ġph on +Ġr aced +ĠAnth rop +Ġday time +ĠEx ercise +Add ing +Ġeng ages +ĠQual comm +Ġmir acles +Ġmem es +ĠDr ink +ĠOri oles +Ġhair s +ĠPol ar +ath om +Ġsl ippery +ĠR emy +Ġcar amel +ĠY EAR +Ġal k +I gn +a ution +ĠMer lin +ĠC ran +Ġap ologies +Ġ4 10 +Ġout ing +ĠMem ories +app ointed +Ġcount ered +u ld +pos ing +Ġfire wall +ĠW ast +ĠW et +work ed +se ller +Ġrepe aled +ere o +ass uming +BL IC +m ite +ĠCEO s +ĠChap el +ellig ent +________________ ________ +D og +Ġw art +Ġsubsc riber +s ports +Ġbe gged +ĠM V +Ġsem if +eth ical +Ġpre ach +Ġrev ital +Ġpun itive +Ġshort cuts +Ġinstit uted +ĠWars aw +Ġabdom en +ĠK ING +Ġsuper intendent +Ġf ry +ĠGe o +T OR +Ġcontrad ictions +apt ic +Ġlandsc apes +b ugs +Ġcl ust +Ġvol ley +c ribed +Ġt andem +Ġrob es +WH AT +Ġpromot er +Ġel oqu +review ed +ĠD K +ĠPl ato +Ġf ps +T ank +ĠDer rick +Ġpriorit ize +as per +ĠHond uras +ĠCom pleted +ne c +Ġm og +n ir +ĠMay o +DE F +st all +in ness +ĠVolks wagen +Ġprec aution +ĠM ell +i ak +ist ries +Ġ24 8 +Ġoverl apping +Sen ate +ĠEnh ance +res y +rac ial +OR TS +ĠM ormons +Str ong +ĠCo ch +Mex ico +ĠMad uro +Ġj ars +Ġcan e +W ik +oll a +iff erence +Ġphysic ist +ĠMag gie +Ġ28 5 +Ġdep iction +ĠMcL aren +J u +Ġsl ows +Ġcommission ers +ĠWill ow +ĠExpl os +hov ah +Ġtechn ician +Ġhom icides +ĠFl av +ĠTr uman +Ġ100 00 +u ctor +Ġsh ader +News letter +45 7 +Ġre ver +Ġhard ened +Ġwhere abouts +Ġrede velop +Ġcar bs +Ġtra vers +Ġsqu irrel +Ġfoll ower +Ġs ings +50 8 +Ġrabb its +emon ium +Ġdocument ing +Ġmisunder stood +) ' +R ick +gg ies +Ġprem ie +Ġsk ating +Ġpass ports +Ġf ists +aged don +H aw +AC P +0 80 +ĠThough ts +ĠCarl son +Ġpriest hood +h ua +Ġdun geons +ĠLo ans +Ġant is +Ġfamiliar ity +ĠS abb +op al +ĠIn k +st rike +Ġc ram +Ġlegal ized +Ġcu isine +Ġfib re +Tra vel +ĠMon ument +OD Y +eth y +Ġinter state +ĠP UR +em porary +ĠArab ian +develop ed +Ġsadd le +Ġg ithub +ĠOff er +ĠIS P +ro let +ĠSUP ER +ĠDen is +Ġmultipl ier +Ġstir red +Interest ingly +Ġcustom ary +Ġbill ed +he x +Ġmultipl ied +Ġfl ipping +ĠCros by +Ġfundament als +ia e +ĠPlay ed +ĠAt om +am azon +ĠFl am +ee z +activ ated +Ġtables poon +Ġliberal ism +ĠPal in +ĠP atel +N um +ĠT AM +Ġs urn +ĠRel oaded +Ġco ined +" ], +ĠCl ash +ĠAg u +Ġprag matic +ĠActiv ate +Ġ8 02 +Ġtrail ers +Ġsil hou +Ġprob es +Ġcirc us +ĠB ain +ĠLind say +ĠAb bey +Del ivery +Ġconcess ion +Ġgast ro +ĠSpr ite +Ä Ł +and el +Ġg imm +Ġaut obi +ĠT urtle +Ġwonder fully +ĠHar am +ĠWorld wide +ĠHand le +Ġtheor ists +Ġsle ek +ĠZh u +ograph ically +EG A +ĠOwn ers +ath s +ĠAntar ctic +n atal +=" " +fl ags +`` `` +Ġs ul +K h +Ġpot assium +Ġlinem an +Ġcere al +ĠSe asons +Ġ20 22 +Ġmat hematic +Ġastron omers +prof essional +Ġf ares +cknow led +Ġch i +Ġyoung sters +Ġmistaken ly +Ġhem isphere +ĠDiv inity +r one +Ġ" , +r ings +Ġattract s +v ana +å ¹ +C AP +Ġplay list +Ġpor ch +ãģ £ +Ġincorpor ates +Ġso ak +Ġassert ing +ĠTerror ism +ĠP ablo +J a +ces ter +Ġfear ing +ĠPr ayer +Ġescal ated +G W +Ġro be +ĠBright on +ac ists +ĠSym phony +ĠDwar f +ĠPar ade +ĠLe go +Ġinex pl +Ġl ords +le af +RA G +l iber +Ġcig ars +ĠJe hovah +60 6 +WIND OWS +ĠLiber ia +eb us +He avy +Ġl ubric +ĠR W +angu ages +Ġnarrow ed +com puter +ĠE mber +Ġmurder ing +Ġdown stream +ĠT uls +ĠT ables +Top ic +ĠAcc uracy += / +l ost +ĠRe i +Ġprogress es +b ear +Ġestablish ments +Just in +ĠPe ach +ĠG omez +å ¿ +ĠTri angle +Id ent +ĠH ive +Res ources +Ġmix es +ĠAss uming +M u +Ġhyp oc +Ġs ane +ĠW an +id ious +Su ccess +Ġ io +Ang el +Ġdanger ously +ĠCreat ure +W ORK +: [ +ĠKat rina +List ener +M iller +ĠId lib +h ang +Ġcircum vent +h ref +Ġcel estial +ĠWe eks +ĠP ug +ĠDal ton +Ġsubpoen a +uk u +Ġpers isted +pe i +old ing +ĠDoc uments +ĠH ast +ĠC ENT +Ġprim er +Ġsyn onymous +Ġn ib +om bs +Ġnot ation +ĠD ish +ĠAt mosp +Ġforb id +ĠAN G +pat tern +l os +Ġproject iles +b rown +." , +ĠVen om +Ġfierce ly +ub lished +ĠU ran +ĠNic arag +4 10 +ĠC AL +OT OS +ĠMir acle +ĠEn chant +Ġguard ing +app end +Att ach +Ġlevel ed +Ġcond oms +ih ilation +64 9 +Ġnight mares +ĠTHE Y +ĠST ART +ĠK inn +Ġroomm ate +Ġhy giene +o pping +J ob +Ġl vl +ĠV ER +ĠKe eping +ab etic +Ġformat ting +eral a +Ġrev isions +Ġres urg +T el +ĠGood man +35 3 +p od +Ġind isp +ĠTrans lation +Ġg own +ĠM und +Ġc is +Ġby stand +col lect +ĠPun jab +act ively +ĠG amb +te ll +Ġimport ing +g encies +Ġloc om +ĠBr ill +H oly +ĠBer ger +Ġshow down +Ġrespond ers +IL Y +Ġt akedown +le ted +Ġmat tered +Ġpredict ive +Ġover lay +G PU +ĠV ick +Ġconvey ed +T ab +pe er +Sc an +Ġdefensive ly +v ae +Ġappro ving +Ġt iers +ĠV ia +quer ade +ĠSaud is +Ġdemol ished +ĠProp he +Ġmon o +Ġhospital ity +H AM +ĠAri el +M OD +ĠTor ah +Ġbl ah +ĠBel arus +erent ial +ĠT uc +Ġbank er +39 7 +Ġmosqu it +ĠScient ist +ĠMus ical +Ġh ust +Sh ift +Ġtor ment +Ġstand off +E duc +ĠF og +Ġampl ifier +Sh ape +Inst ance +ĠCrit ics +Ġda emon +H ouston +Ġmatt ress +ĠID F +Ġobsc ene +ĠA mer +hett i +Ġcomp iling +35 2 +vere tt +ĠRed uction +ist ration +ĠBl essed +ĠB achelor +3 16 +Ġpr ank +ĠVul can +dd ing +Ġm ourning +ĠQu int +ĠBl aster +test ing +Ġsed iment +>> > +ĠE ternity +ĠWH ERE +ĠM aze +Ġreact ing +ĠAl v +oms day +ĠC RA +Ġtransl ator +Ġbog us +at u +We bsite +oll s +Ġbapt ism +Ġs ibling +ĠAut umn +ve z +ãģ® é +gu ards +Ge org +assad ors +ĠFre ud +Ġcontin ents +ĠReg istry +Bern ie +ĸļ 士 +Ġtoler ant +ĠU W +Ġhor ribly +99 5 +ĠMID I +Ġimpat ient +oc ado +er i +ĠWor st +ĠNor ris +ĠTalk ing +Ġdef ends +ens able +Ġ20 21 +Ġanat omy +L ew +Ġdraw er +ĠCan berra +Ġpatri otic +é¾įå ĸļ士 +ĠAv g +AR M +Ġundis closed +Ġfare well +45 9 +b able +ĠAll ison +OL OG +Ġcon co +t ight +ĠAC PI +ĠM ines +l ich +ĠâĶ ľ +represent ed +200 000 +Ġenthusi ast +OT S +b il +ĠIng redients +Ġinvent or +ĠMy SQL +³³ Âł +ĠAB OUT +with in +Ġm k +B ul +ĠF ake +Ġdracon ian +W a +hel m +ĠTer ran +erv ille +Ġcommon place +SI ZE +Ġ" < +re place +ograph s +ĠSE LECT +inc ible +ĠMost ly +ĠShe ffield +ĠID E +ugg le +Ġcit ations +h urst +ĠUn ix +Ġunle ash +ĠP iper +ĠN ano +Ġsucc umb +Ġreluct ance +Ġ25 00 +ĠMer chant +Ġwire t +Ġcomb os +ĠBirth day +Ġchar coal +ĠU PS +ĠFair fax +Ġdrive way +ĠT ek +ĠP itch +ove re +Ġtechn icians +ĠAct ual +fl ation +ĠF iscal +ĠEm pty +an amo +Ġmag nesium +Ġsl ut +Ġgrow ers +Invest igators +( ): +ĠS atellite +ĠKe ynes +miss ive +l ane +Ġb orough +3 44 +ĠTE AM +ĠBet hesda +C V +h ower +ĠR AD +Ġch ant +ĠR iy +Ġcompos itions +Ġmild ly +Ġmedd ling +Ġag ility +ane ers +5 01 +Ġsyn th +ling er +29 1 +Ġex claimed +Part y +Ġcont amin +ĠMan or +ĠResp ond +Ġpra ising +Ġman ners +fle et +Sum mer +ĠLy nd +ĠDef initely +gr im +Ġbow ling +st ri +ç Ľ +y nt +Ġmand ates +D IV +Ġreconc ile +view s +ĠDam on +vet te +F lo +ĠGreat est +il on +ic ia +Ġportray al +Ġcush ion +50 4 +19 79 +oss al +App lic +sc ription +Ġmit igation +AT S +p ac +Ġer ased +Ġdefic iencies +ĠHolland e +ĠX u +Ġb red +Ġpregn ancies +f emin +Ġem ph +Ġpl anners +Ġout per +utter ing +Ġperpet rator +Ġm otto +ĠEll ison +ĠNE VER +Ġadmitted ly +AR I +ĠAzerbai jan +Ġmill isec +Ġcombust ion +ĠBott le +ĠL und +ĠP s +ĠD ress +Ġfabric ated +Ġbat tered +Ġs idel +ĠNot ting +Fore ign +ĠJer ome +0 20 +ĠAr bit +Ġkn ots +ĠR IGHT +M oving +ãģ Ļ +Ġsur geries +Ġcour thouse +Ġm astered +Ġhover ing +ĠBr an +ĠAl ison +Ġsaf est +m ilitary +Ġbull ied +Ġbar rage +Read er +ES E +ĠGe ographic +T ools +3 14 +ĠGe ek +ro th +gl ers +ĠF IN +Ï ģ +ĠA ston +al tern +48 8 +Ġveter in +G amer +Ġint el +ren ches +Sh ield +Ġam nesty +ĠB har +Ġp iled +Ġhonor able +ĠInst itutes +Ġso aked +Ġcom a +ĠE FF +34 1 +by tes +ĠG mail +le in +ĠCanad iens +m aterial +I l +Ġinstruct ors +ĠK Y +Ġconce ive +ub b +ĠP ossible +Ġeas ing +ĠChrist ina +Ġcar ic +ĠHD R +R OM +Ġsho vel +de lete +Ġp uff +ĠCh anging +Ġseam lessly +Att ribute +Ġacqu isitions +ak ery +ĠE F +Ġaut istic +ĠT akes +ĠPow der +ĠSt ir +5 10 +ĠBub ble +sett ings +ĠF owler +Ġmust ard +Ġmore over +Ġcopyright ed +ĠLED s +15 00 +æ ī +ĠH IS +en f +Ġcust od +ĠH uck +G i +Ġim g +An swer +C t +j ay +ĠInf rastructure +Ġfeder ally +L oc +Ġmicro bes +Ġover run +dd s +ot ent +adi ator +>>>> >>>> +Ġtorn ado +Ġadj ud +Ġintrig ued +Ġs i +ĠRevel ation +pro gress +Ġburgl ary +ĠSai yan +ĠK athy +Ġser pent +ĠAndre as +Ġcomp el +ess ler +ĠPl astic +ĠAd vent +ĠPos itive +ĠQ t +ĠHind us +reg istered +ular ity +Ġrighteous ness +Ġdemon ic +u itive +ĠB DS +ĠGre gg +c ia +ĠCrus ade +ĠSina i +W ARE ++ ( +Ġme ll +Ġder ail +y ards +A st +Ġnotice ably +ĠO ber +R am +Ġun noticed +Ġse q +av age +T s +Ġ6 40 +Ġconced e +Ġ] ) +F ill +Ġcapt ivity +ĠImprove ment +ĠCrus ader +ara oh +M AP +æ Ĺ +Ġstr ide +al ways +F ly +N it +Ġal gae +ĠCook ing +ĠDo ors +Mal ley +Ġpolic emen +ãģ į +Ġastron aut +access ible +49 5 +ĠR AW +cl iffe +udic rous +Ġdep ended +al ach +Ġvent ures +ra ke +Ġt its +ĠH ou +Ġcond om +ormon al +Ġind ent +Ġupload ing +Foot note +Import ant +Ġ27 1 +Ġmind ful +Ġcont ends +C ra +Ġcal ibr +ĠO ECD +plug in +F at +ĠIS S +ĠDynam ics +ans en +68 6 +' ), +Ġsp rite +Ġhand held +ĠH ipp +=~ =~ +Tr ust +Ġsem antics +ĠBund es +ĠRen o +ĠLiter ature +s ense +G ary +ĠA eg +ĠTr in +EE K +Ġcler ic +ĠSS H +Ġch rist +Ġinv ading +ib u +Ġen um +aur a +Ġal lege +ĠInc redible +B BC +Ġth ru +Ġsa iled +Ġem ulate +Ġin security +Ġc rou +Ġaccommod ations +Ġincompet ent +Ġsl ips +ĠEarth qu +s ama +IL LE +Ġi Phones +as aki +Ġby e +Ġar d +Ġext ras +Ġsl aughtered +Ġcrowd funding +res so +Ġfil ib +ĠER ROR +ĠT LS +e gg +ĠIt al +Ġen list +ĠCatal onia +ĠSc ots +Ġser geant +Ġdiss olve +N H +Ġstand ings +ri que +I Q +Ġbenef iciary +Ġaqu arium +You Tube +ĠPower Shell +Ġbright est +ĠWar rant +S old +Writ ing +Ġbegin nings +ĠRes erved +ĠLatin os +head ing +Ġ4 40 +Ġrooft op +AT ING +Ġ3 90 +VP N +G s +k ernel +turn ed +Ġprefer able +Ġturn overs +ĠH els +S a +ĠShin ji +ve h +ĠMOD ULE +V iol +Ġex iting +Ġj ab +ĠVan illa +Ġac ron +ĠG ap +ber n +A k +ĠMc Gu +Ġend lessly +ĠFar age +ĠNo el +V a +M K +Ġbr ute +ĠK ru +ĠES V +ĠOl ivia +âĢ ł +ĠK af +Ġtrust ing +Ġh ots +3 24 +Ġmal aria +Ġj son +Ġp ounding +ort ment +Count ry +Ġpostp oned +Ġunequ iv +? ), +ĠRo oney +udd ing +ĠLe ap +ur rence +sh apeshifter +ĠH AS +os ate +Ġca vern +Ġconserv atism +ĠB AD +Ġmile age +Ġarrest ing +V aults +Ġmix er +Dem ocratic +ĠB enson +Ġauth ored +8 000 +Ġpro active +ĠSpirit ual +t re +Ġincarcer ated +ĠS ort +Ġpe aked +Ġwield ing +re ciation +×Ļ × +P atch +ĠEm my +Ġex qu +tt o +ĠRat io +ĠP icks +ĠG ry +ph ant +Ġf ret +Ġeth n +Ġarch ived +% - +c ases +ĠBl aze +Ġim b +c v +y ss +im ony +Ġcount down +Ġaw akening +ĠTunis ia +ĠRe fer +ĠM J +Ġun natural +ĠCar negie +iz en +ĠN uggets +he ss +Ġev ils +64 7 +Ġintrodu ctory +l oving +ĠMcM ahon +Ġambig uity +L abel +ĠAlm ighty +Ġcolor ing +ĠCl aus +set ting +N ULL +ĠF avorite +ĠS IG +> ( +ĠSh iva +ĠMay er +Ġstorm ed +ĠCo verage +we apons +igh am +Ġun answered +Ġle ve +Ġc oy +c as +b ags +as ured +Se attle +ĠSant orum +ser ious +Ġcourage ous +ĠS oup +Ġconfisc ated +Ġ// / +Ġuncon ventional +Ġmom s +ĠRohing ya +ĠOrche stra +ĠPot ion +Ġdisc redit +ĠF IL +f ixed +ĠDe er +do i +ĠDim ension +Ġbureaucr ats +et een +Ġaction Group +oh m +Ġb umps +ĠUt ility +Ġsubmar ines +ren heit +re search +ĠShap iro +Ġsket ches +Ġde ceptive +ĠV il +es ame +ĠEss entially +Ġramp age +isk y +Ġmut tered +th ritis +Ġ23 6 +f et +b ars +Ġpup il +ĠTh ou +o S +s ong +Ġfract ured +Ġre vert +pict ure +Ġcrit erion +us her +Ġreperc ussions +ĠV intage +ĠSuper intendent +Offic ers +Ġflag ged +Ġbl ames +Ġin verse +ograp hers +Ġmakes hift +Ġdev oid +Ġfoss ils +ĠArist otle +ĠFund s +Ġde pleted +ĠFl u +ĠY uan +Ġw oes +Ġlip id +Ġsit u +requ isites +Ġfurn ish +ĠSam ar +Ġshame ful +Ġadverse ly +Ġad ept +Ġrem orse +Ġmurder ous +uck les +ĠE SL +Ġ3 14 +s ent +Ġred ef +ĠC ache +ĠP urs +ig ans +Ġ4 60 +Ġpres criptions +Ġf res +F uck +ocr ates +Tw enty +ĠWe ird +ĠT oggle +ĠC alled +itiz ens +Ġp oultry +Ġharvest ing +ãĤ¦ ãĤ¹ +Bott om +Ġcaution ed +t n +39 6 +ĠNik ki +Ġeval uations +Ġharass ing +Ġbind ings +ĠMon etary +Ġhit ters +Ġadvers ary +un ts +Ġset back +Ġenc rypt +ĠC ait +Ġl ows +eng es +ĠN orn +Ġbul bs +Ġbott led +ĠVoy ager +3 17 +Ġsp heres +p olitics +Ġsubt ract +Ġsens ations +Ġapp alling +Ġ3 16 +Ġenvironment ally +ĠST EM +Ġpub lishes +5 60 +Ġdilig ence +48 4 +Ġadv ises +Ġpet rol +Ġimag ining +Ġpatrol s +ĠInt eger +ĠAs hes +act us +ĠRad iant +ĠL T +it ability +ht aking +Set ting +Ġnu anced +ĠRe ef +ĠDevelop ers +N i +pie ces +99 0 +Lic ense +Ġlow ers +ĠOtt oman +3 27 +oo o +Ġqu itting +mark ets +Beh ind +Ġbas in +Ġdoc s +an ie +fl ash +ct l +Ġcivil ized +ĠFuk ushima +"] ," +ĠK S +ĠHonest ly +ar at +Ġconstruct s +ĠL ans +ĠD ire +ĠLI KE +ĠTrou ble +Ġwith holding +ĠOb livion +Ġsan ity +any a +Con st +Ġgro cer +ĠC elsius +Ġrecount ed +ĠW ife +B order +ate red +h appy +Ġspo iler +Ġlog ically +H all +Ġsucceed ing +Ġpoly morph +Ġax es +ĠShot gun +ĠS lim +ĠPrin ciples +ĠL eth +art a +Ġsc or +Sc reenshot +Ġrelax ation +#$ #$ +Ġdeter rent +idd y +Ġpower less +Ġles bians +Ġch ords +ĠEd ited +se lected +Ġseparat ists +000 2 +Ġair space +Ġturn around +Ġc unning +P ATH +P oly +Ġbomb ed +Ġt ion +x s +Ġwith hold +Ġw aged +ĠLiber ties +Fl ag +Ġcomfort ing +45 4 +ĠI ris +are rs +Ġr ag +Ġrel ocated +ĠGu arant +Ġstrateg ically +Ġgam ma +uber ty +ĠLock heed +g res +Ġgr illed +ĠLow e +st ats +ĠR ocks +Ġsens ing +Ġrent ing +ĠGe ological +ا Ø +ot rop +Ġse w +Ġimproper ly +48 6 +Ġâĸ ł +Ġstar ving +ĠB j +Disc ussion +3 28 +ĠCom bo +ĠFix es +N AT +Ġstri ving +th ora +Ġharvest ed +ĠP ing +Ġplay ful +Ġaven ues +Ġoccup ational +Ġw akes +ĠCou rier +Ġdrum mer +ĠBrow ser +ĠH outh +it u +Ġapp arel +p aste +Ġhun ted +ĠSecond ly +l ain +X Y +ĠP IN +ic ons +Ġcock tails +Ġs izable +Ġhurd les +est inal +ĠRecre ation +Ġe co +64 8 +ĠD ied +m int +Ġfinger prints +Ġdis pose +ĠBos nia +ts y +22 00 +Ġins pected +ĠF ou +Ġf uss +Ġamb ush +ĠR ak +Ġmanif ested +Pro secut +Ġsuff ice +ren ces +Ġcompens ated +ĠC yrus +Ġgen us +ĠWolver ine +ĠTrend s +Ġh ikes +ĠSe en +Ġen rol +C old +Ġpol itely +ĠSl av +ĠRu pert +Ġey ewitness +ĠAl to +Ġun comp +Ġposter ior +M ust +ĠHer z +Ġprogress ively +Ġ23 4 +Ġind ifference +ĠCunning ham +Ġacadem ia +Ġse wer +Ġast ounding +ĠA ES +r ather +Ġeld est +Ġclim bs +ĠAdd s +Ġout cry +Ġcont ag +ĠH ouses +Ġpe pt +ĠMel ania +interest ed +ĠU CH +ĠR oots +ĠHub bard +ĠT BD +ĠRoman ian +fil ename +St one +ĠIm pl +Ġchromos ome +C le +d x +Ġscram bled +ĠP t +Ġ24 2 +OP LE +Ġtremend ously +St reet +Ġcra ving +Ġbund led +ĠR G +p ipe +Ġinj uring +Ġarc ane +Part icip +ĠHero ic +st y +Ġto pping +ĠTemp est +rent ices +b h +Ġpar anoia +ĠUnic ode +Ġegreg ious +Ġ\ ' +ĠOsw ald +Ġgra vel +ĠSim psons +Ġbl and +ĠGuant anamo +Writ er +lin ers +ĠD ice +J C +Ġpar ity +Ġs ided +Ġ23 7 +ĠPyr rha +at ters +d k +F ine +comp an +Ġform ulated +ĠId ol +il ers +hem oth +ĠF av +Ġintr usion +Ġcar rots +ĠL ayer +ĠH acker +Ġ ---------------- +Ġmoder ation +é ģ +oc oc +Ġcharacter ize +ĠTe resa +Ġsocio economic +Ġper k +ĠParticip ation +tr aining +ĠPaul o +ph ys +Ġtrust worthy +Ġembod ied +ĠMer ch +c urrency +ĠPrior ity +Ġte asing +Ġabsor bing +Ġunf inished +ĠCompar ison +Ġdis ple +writ ers +Ġprofess ions +ĠPengu in +Ġang rily +ĠL INK +68 8 +ĠCor respond +Ġprev ailed +Ġcart el +l p +as ms +ĠRed emption +ĠIslam ists +effect s +d ose +ĠL atter +ĠHal ifax +Ġv as +ĠTop ics +ĠN amed +advert ising +zz a +IC ES +Ġret arded +ach able +ĠPupp et +ĠItem Level +Ġret ract +Ġident ifiable +A aron +ĠB uster +s ol +hel le +as semb +H ope +r anged +B a +ĠP urch +é Ģ +ĠSir i +Ġarri vals +Ġ19 12 +Ġshort ened +Ġ3 12 +Ġdiscrep ancy +ĠTem perature +ĠWal ton +Ġkind erg +p olit +Ġrem ix +Ġconnect ors +ãĥĺ ãĥ© +ĠKazakh stan +dom inated +Ġsu gars +im ble +ĠPan ic +ĠDem and +ĠCol ony +on en +ĠM ER +7 75 +ur ia +aza ar +ĠDeg ree +P ri +Ġsun shine +Ġ25 1 +Ġpsychedel ic +Ġdigit ally +ĠBra un +Ġsh immer +Ġsh ave +ĠTel esc +ĠAst ral +ĠVenezuel an +ĠO G +Ġc rawling +Int eg +ĠFe ather +Ġunfold ing +Ġappropri ation +Ġè£ı è +ĠMob ility +ĠN ey +- . +b ilt +L IN +ĠT ube +ĠCon versely +Ġkey boards +ĠC ao +Ġover th +Ġla ure +>> \ +ĠV iper +ach a +Off set +ĠR aleigh +ĠJ ae +J ordan +j p +Ġtotal itarian +Connect or +Ġobserv es +ĠSpart an +ĠIm mediately +ĠSc al +C ool +Ġt aps +Ġro ar +P ast +Ġch ars +ĠB ender +ĠShe ldon +Ġpain ter +Ġbe acon +ĠCreat ures +Ġdownt urn +Ġh inder +ĠAnd romeda +à Ľ +cc oli +ĠF itness +et rical +Ġutil izes +Ġsen ate +Ġen semble +Ġche ers +T W +Ġaff luent +k il +ry lic +ord ering +Com puter +Ġgru esome +ost ics +ĠUb isoft +ĠKel ley +Ġw rench +Ġbourgeois ie +IB LE +ĠPrest on +w orn +ar ist +reat ing +Ġst ained +ar ine +Ġsl ime +EN N +Ġche sts +Ġground water +ann ot +ĠTr ay +ĠLoc ke +ĠC TR +Ġd udes +ĠEx ternal +ĠDec oder +Ġpar amed +ĠMed line +80 9 +ĠD inner +rup al +g z +ĠG um +ĠDem o +j ee +Ġd h +ber man +arch s +Ġen qu +ĠEp stein +Ġdevast ation +Ġfriends hips +ĠAr d +Ġ23 1 +ĠRub in +ĠDist ance +Ġsp urred +Ġd ossier +Ġover looking +\\\\\\\\ \\\\\\\\ +Fore st +ĠCom es +\ ", +ĠIran ians +Ġf ixtures +L aughs +Ġcur ry +ĠKing ston +Ġsqu ash +Ġcat alogue +Ġabnormal ities +Ġdigest ive +.... ..... +Ġsubord inate +og ly +Ġ24 9 +M iddle +Ġmass ac +Ġburg ers +Ġdown stairs +Ġ19 31 +39 4 +ĠV G +Ġl asers +ĠS ikh +ĠAlex a +der ived +Ġcycl ist +ãģ® éŃĶ +onel iness +!!!! !!!! +Ġbuff s +leg ate +Ġrap ing +Ġrecomm ending +ro red +Ġmult icultural +un ique +Ġbusiness men +Ġune asy +ĠM AP +Ġdisp ersed +cipl ine +J ess +ĠK erala +å § +Ġabst raction +Sur v +U h +Ġprin ters +ij a +ow der +Ġanalog ous +ĠA SP +af er +Ġunfold ed +Ġlevel ing +Ġbre ached +ĠH earing +Ġn at +Ġtransl ating +crit ical +Ġant agonist +ĠYes terday +Ġfuzz y +w ash +m ere +Ġbe wild +ĠM ae +V irgin +ph rase +Ġsign aled +ĠH IGH +Ġprot ester +Ġgar ner +unk nown +Ġk ay +Ġabduct ed +Ġst alking +am n +Ġdes erving +ĠR iv +ĠJ orge +Ġscratch ing +ĠS aving +ip ing +Ġte ase +Ġmission ary +ĠMor row +T IME +P resent +Ġchem otherapy +tern ess +ĠH omes +ĠP urdue +Ġst aunch +ĠWhit ney +ĠTH ERE +Î ¼ +iat us +ĠErn est +ĠDe ploy +Ġcove ted +F ML +ĠDial ogue +Ġex ited +f ruit +Ġner d +":" "," +Ġv ivo +ru ly +4 60 +ĠAm en +rehens ible +Ġâ ĺ +D IR +Ġad herence +Ġche w +ĠCo ke +ĠSerge i +dig ital +ĠNe ck +g ently +enth al +/ ) +Ġwe ary +Ġgu ise +ĠConc ord +ĠOn ion +at cher +Ġb inge +ĠDirect ive +Ġman ned +ans k +Ġill usions +Ġbillion aires +38 3 +oly n +odynam ic +ĠWhe at +ĠA lic +Ġcol oured +ĠN AFTA +ab o +Ġmac ros +ind ependent +s weet +Ġsp ac +ĠK abul +Ġ Ä +em e +Ġdict ated +Ġsh outs += { +Ġr ipping +ĠSh ay +ĠCr icket +direct ed +Ġanalys ed +ĠWAR RANT +ag ons +ĠBlaz ers +Ġche ered +Ġar ithmetic +ĠTan z +37 3 +ĠFl ags +Ġ29 5 +Ġw itches +ĠIn cluded +ĠG ained +ĠBl ades +G am +ĠSam antha +ĠAtl antis +ĠPr att +Ġspo iled +ĠI B +ĠRam irez +Pro bably +re ro +ĠN g +ĠWar lock +t p +Ġover he +Ġadministr ations +Ġt int +Ġreg iment +Ġpist ols +Ġblank ets +Ġep ist +Ġbowl s +Ġhydra ulic +Ġde an +Ġj ung +Ġasc end +70 5 +ĠSant iago +à ® +Ġun avoid +ĠSh aman +re b +Ġstem ming +99 8 +ĠM G +st icks +esthes ia +ER O +Ġmor bid +ĠGr ill +ĠP oe +any l +Ġdele ting +ĠSurve illance +Ġdirect ives +Ġiter ations +ĠR ox +ĠMil ky +F ather +Ġpat ented +44 7 +Ġprec ursor +Ġm aiden +ĠP hen +ĠVe gan +ĠPat ent +K elly +Redd itor +Ġn ods +Ġvent ilation +ĠSchwar z +Ġw izards +Ġomin ous +ĠHe ads +ĠB G +Ġl umber +ĠSp iel +Ġis Enabled +Ġancest ral +ĠSh ips +Ġwrest ler +ph i +Ġy uan +ĠRebell ion +Ġice berg +Ġmag ically +Ġdivers ion +ar ro +yth m +ĠR iders +ĠRob bie +ĠK ara +ĠMain tenance +ĠHer b +Ġhar ms +p acked +ĠFe instein +Ġmarry ing +Ġbl ending +ĠR ates +Ġ18 80 +Ġwr ink +ĠUn ch +ĠTor ch +desc ribed +Ġhuman oid +ilit ating +ĠCon v +ĠFe ld +IGH TS +Ġwhistlebl ower +ort mund +ets y +arre tt +ĠMon o +ĠI ke +ĠC NBC +ĠW AY +ĠMD MA +ĠIndividual s +Ġsupplement al +Ġpower house +ĠSt ru +F ocus +aph ael +ĠCol leg +att i +Z A +Ġp erenn +ĠSign ature +ĠRod ney +Ġcub es +idd led +ĠD ante +ĠIN V +iling ual +ĠC th +Ġso fa +Ġintimid ate +ĠR oe +ĠDi plom +ĠCount ries +ays on +Ġextrad ition +Ġdis abling +ĠCard iff +Ġmemor andum +ĠTr ace +Ġ?? ? +se ctor +ĠRou hani +ĠY ates +ĠFree ze +Ġbl adder +M otor +ĠProm ise +ant asy +Ġforesee able +ĠC ologne +cont ainer +ĠTre es +ĠG ors +ĠSin clair +Ġbar ring +key e +Ġsl ashed +ĠStat istical +é ĩ +Ġâĸ º +All ows +Ġhum ility +Ġdr illed +ĠF urn +44 3 +Ġse wage +Ġhome page +Ġcour tyard +Ġv ile +Ġsubsid iaries +aj o +direct ory +Ġam mon +V ers +charg es +Ġ} } +ĠCh ains +Ġ24 6 +n ob +Ġper cept +Ġg rit +Ġfisher men +ĠIraq is +ĠDIS TR +ĠF ULL +ĠEval uation +g raph +at ial +Ġcooper ating +Ġmel an +Ġenlight ened +Ġal i +t ailed +Ġsal ute +Ġweak est +ĠBull dogs +U A +ĠAll oy +Ġsem en +oc ene +ĠWilliam son +s pr +, âĢĶ +ĠG F +itt ens +Be at +ĠJ unk +iph ate +ĠFarm ers +ĠBit coins +ig ers +d h +ĠL oyal +p ayer +Ġentert ained +Ġpenn ed +Ġcoup on +Que ue +Ġweaken ing +c arry +Ġunderest imate +Ġshoot out +Ġcharism atic +ĠProced ure +Ġprud ent +in ances +Ġric hes +Ġcort ical +Ġstr ides +Ġd rib +ĠOil ers +5 40 +ĠPer form +ĠBang kok +Ġe uth +S ER +Ġsimpl istic +t ops +camp aign +Q uality +Ġimpover ished +ĠEisen hower +Ġaug ment +ĠH arden +Ġinterven ed +Ġlist ens +ĠK ok +Ġs age +Ġrub bish +ĠD ed +Ġm ull +pe lling +Ġvide ot +Produ ction +D J +m iah +Ġadapt ations +Ġmed ically +Ġboard ed +Ġarrog ance +Ġscra pped +Ġopp ress +FORM ATION +Ġj unction +4 15 +EE EE +S kill +Ġsub du +ĠSug gest +ĠP ett +Ġle tt +ĠMan ip +ĠC af +ĠCooper ation +T her +Ġreg ained +¶ æ +ref lect +Ġth ugs +ĠShel by +Ġdict ates +ĠWe iner +ĠH ale +Ġbatt leground +s child +Ġcond ol +h unt +osit ories +Ġacc uses +Fil ename +Ġsh ri +Ġmotiv ate +Ġreflect ions +N ull +ĠL obby +¥ µ +ĠS ATA +ĠBack up +Ñ ĥ +n in +ĠCor rection +Ġju icy +ut ra +ĠP ric +Ġrest raining +ĠAir bnb +ĠAr rest +Ġappropri ations +Ġsl opes +Ġmans laughter +Ġwork ings +ĠH uss +ĠF rey +Le ave +ĠHarm ony +ĠF eder +Ġ4 30 +Ġt rench +Ġglad ly +Ġbull pen +ĠG au +b ones +Ġgro ove +Ġpre text +ã ħĭ +Ġtransm itter +ĠComp onent +Ġunder age +ĠEm pires +T ile +Ġo y +ĠMar vin +ĠC AS +Ġbl oss +Ġrepl icated +ĠMar iners +Marc us +ĠBl ocks +Ġliber ated +Ġbutter fly +Fe el +Ġfer mentation +Ġyou tube +Ġoff end +ĠTer m +res ist +Ġcess ation +Ġinsurg ency +Ġb ir +ĠRa ise +59 5 +Ġhypothes es +50 2 +Ġpl aque +ocr at +Ġjack ets +ĠHuff Post +am ong +Ġconf er +48 7 +ĠL illy +Ġadapt ing +ĠF ay +Ġsh oved +ve c +Ġref ine +Ġg on +Ġgun men +z ai +ĠShut tle +ĠI zan +Ġ19 13 +Ġple thora +· · +Ġ5 10 +Ġp uberty +Ġ24 1 +ĠWe alth +ĠAl ma +ĠM EM +ĠAd ults +C as +pr ison +R ace +Ġwater proof +Ġathlet icism +Ġcapital ize +ĠJu ice +Ġillum inated +ĠP ascal +Ġirrit ation +ĠWitness es +ad le +ĠAst ro +Ġf ax +ĠEl vis +Prim ary +ĠL ich +ĠEl ves +Ġres iding +Ġst umble +3 19 +ĠP KK +Ġadvers aries +D OS +ĠR itual +Ġsm ear +Ġar son +ident al +Ġsc ant +Ġmon archy +Ġhal ftime +Ġresid ue +Ġind ign +ĠSh aun +ĠEl m +aur i +A ff +W ATCH +ĠLy on +hel ps +36 1 +Ġlobby ist +Ġdimin ishing +Ġout breaks +Ġgo ats +f avorite +ĠN ah +son ian +ĠBo oster +Ġsand box +ĠF are +ĠMalt a +Ġatt Rot +ĠM OR +ld e +Ġnavig ating +T ouch +Ġunt rue +ĠDis aster +Ġl udicrous +Pass word +ĠJ FK +blog spot +4 16 +ĠUN DER +ern al +Ġdelay ing +T OP +Ġimpl ants +ĠAV G +ĠH uge +att r +Ġjournal istic +ĠPe yton +ĠI A +R ap +go al +ĠProgram me +Ġsm ashing +w ives +print ln +ĠPl ague +in us +EE P +Ġcru iser +ĠPar ish +umin ium +Ġoccup ants +ĠJ ihad +m op +Ġp int +Ġhe ct +ĠMe cca +direct or +ĠFund ing +ĠM ixed +Ġst ag +T ier +Ġg ust +Ġbright ly +ors i +Ġup hill +R D +Ġles ions +ĠBund y +liv ious +Ġbi ologist +ĠFac ulty +ĠAuthor ization +Ġ24 4 +All ow +ï ¸ +ĠGi ul +Ġpert inent +ot aur +es se +ĠRo of +Ġunman ned +35 1 +ĠSh ak +ĠO rient +Ġend anger +D ir +Ġrepl en +ed ient +Ġtail or +Ġgad gets +Ġaud ible +âĺ Ĩ +N ice +Ġbomb ard +ĠR ape +Ġdef iance +ĠTW O +ĠFilip ino +Ġunaff ected +erv atives +Ġso ared +ĠBol ton +Ġcomprom ising +ĠBrew ers +R AL +ĠA HL +icy cle +Ġv ampires +Ġdi pped +oy er +ĠX III +Ġsidew ays +ĠW aste +ĠD iss +ĠâĶľ âĶĢâĶĢ +$ . +Ġhabit ats +ĠBe ef +tr uth +tr ained +spl it +R us +And y +ĠB ram +RE P +p id +è£ ħ +ĠMut ant +An im +ĠMar ina +Ġfut ile +hig hest +f requency +Ġepile psy +Ġcop ing +Ġconc ise +Ġtr acing +ĠS UN +pan el +ĠSoph ie +ĠCrow ley +ĠAd olf +ĠShoot er +Ġsh aky +ĠI G +ĠL ies +ĠBar ber +p kg +Ġupt ake +Ġpred atory +UL TS +/ ** +Ġintox icated +ĠWest brook +od der +he ment +Ġbas eman +AP D +st orage +ĠFif ty +ed itor +G EN +UT ION +ir ting +Ġse wing +r ift +Ġag ony +ĠS ands +Ġ25 4 +C ash +Ġl odge +Ġp unt +N atural +ĠIde as +Ġerrone ous +ĠSens or +ĠHann ity +Ġ19 21 +Ġm ould +ĠG on +kay a +Ġanonym ously +ĠK EY +Ġsim ulator +W inter +Ġstream ed +50 7 +? ", +Ġte ased +Ġco efficient +Ġwart ime +ĠTH R +' '. +ĠBank ing +mp ire +Ġf andom +Ġl ia +G a +Ġdown hill +Ġinterpre ting +Ind ividual +N orm +Ġjealous y +bit coin +Ġple asures +ĠToy s +ĠChev rolet +ĠAd visor +IZ E +Ġrecept ions +70 6 +C ro +Ġ26 2 +Ġcit rus +ir u +Review er +ject ed +U ES +an z +19 81 +ĠWork er +Ġcompl ied +ores cent +contin ental +T on +ĠPr ism +ĠShe ep +Ġ28 8 +n ox +ĠV og +O rd +Ġreal ms +te k +Ġirrig ation +Ġbicy cles +Ġelectron ically +p oly +t all +() ); +Ġaest hetics +ĠInteg rated +Expl ore +Ġd unk +47 6 +p ain +ĠJac ques +ĠD mit +Fram es +Ġreun ited +Ġhum id +D ro +P olitical +Ġyouth ful +Ġent ails +Ġmosqu ito +36 3 +spe cies +Ġcoord inating +ĠMay hem +ĠMagn us +M ount +Impro ved +ĠST ATE +ATT LE +Ġflow ed +Ġtack led +Ġfashion ed +Ġre organ +iv ari +f inger +Ġreluct antly +et ting +ĠV and +you ng +ĠGar land +Ġpresum ption +Ġamen ities +ĠPle asant +on ential +ĠO xy +Ġmor als +ĠY ah +Read y +Sim on +En h +D emon +Ġcl ich +Mon itor +ĠD U +Ġwel comes +Ġstand out +Ġdread ful +Ġban anas +Ġball oons +h ooting +bas ic +Ġsuff ix +Ġd uly +can o +Ch ain +at os +Ġgeop olitical +Ġ( & +ĠGem ini +ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ +Ġacqu itted +L uck +prot ect +10 24 +Ġsc arcity +Ġmind fulness +ec ided +D N +pr ime +ĠPres idents +ĠVID EO +Ġ( âĪĴ +add ock +N OR +ĠP ru +p un +ĠL OL +)) )) +ĠL iqu +ĠS AS +Ġsty ling +Ġpunish ments +Ġnum b +Ġasc ertain +ĠRock ies +f lu +Th umbnail +Ġperpet rated +ĠSem i +Ġdis arm +ĠOld er +ĠEx ception +Ġexponent ially +ĠCommun ities +Ġabol ish +ĠPart ner +pt oms +Ġ7 77 +ĠFo ley +ĠC ases +Ġgre ase +ĠReb irth +G round +Ġ; ) +ĠDoct rine +ik ini +Y e +ĠBl ossom +Ġpers ists +b ill +Ġinf usion +Ġbud dies +9 11 +ĠPat ient +Ġdem os +Ġacquaint ance +ĠP aw +at ari +Ġx ml +Ġfasc ination +ĠSer ve +Ï Ĥ +br anded +Ġa z +Return s +Ġover shadow +Ġro am +Ġspeed y +n umbered +hel ial +Ġdisc iple +Ġass urances +g iven +pect ing +ĠN atalie +çĶ ° +Ġmosquit oes +rote in +Ġnumer ic +Ġindepend ents +Ġtrans itional +Ġreaction ary +ĠMech dragon +do ctor +Ġshort est +Ġsequ ential +ĠB ac +ĠAccount s +ãģ Į +ach y +ract ive +ĠReg iment +Ġbreat htaking +ffic iency +ĠB ates +Ġ3 11 +Ġward robe +ft s +ĠBer k +Sim ply +ĠRivers ide +iver ing +ident ial +lu cent +Ġen riched +ĠCon ver +ĠG iving +ãĥ Ļ +Ġlegal ize +ĠF TC +Ġfre aking +M ix +Ġter restrial +es ian +ci ents +W ing +LO AD +Ġled ge +ĠViol ent +ĠMet all +Ġ30 8 +Ġs outheastern +hett o +M eat +Ġslow down +Ġret reated +Jere my +end as +**** * +er ic +Ġre ins +opp able +ĠHuman ity +ear ances +rig an +C amera +Ġwa ivers +s oc +Ġalter ation +trans form +ĠC emetery +50 6 +Ġindef inite +Ġstim ulating +y g +60 3 +ĠS op +Ġdescript ive +Ph ase +ĠEd mund +Ġpneum onia +vent us +A mb +Ġlabor atories +ĠEx clusive +ug ar +W ere +Ġmalf unction +Ġhomosexual s +Ġ---- --- +un i +Ġturb ines +ĠEqu ity +D u +Ġmind ed +ĠR H +ĠBlack hawks +Ġfe ats +Ġ17 00 +re pl +36 2 +lad en +Ġindisp ensable +ly ss +tt i +Ġre el +Ġdiver ted +Ġlik eness +Ġsubscript ions +Ġfing ert +Ġfil thy +dest ruct +d raft +ĠBernard ino +l aunch +Ġper plex +ĠS UM +car b +Ġswe ater +ĠVent ure +ĠJ ag +ĠCele b +ĠV oters +Ġstead fast +Ġathlet ics +ĠHans on +ĠDr ac +Tr acker +Ġcomm end +ĠPres idency +ĠD ID +in formed +Ġweb page +P retty +Ġforce fully +ãĥĥ ãĤ¯ +Ġrel ocation +Ġsat ire +â ī +ĠSunder land +æ Ħ +V oice +???? ???? +Ġinform ant +Ġbow el +ĠUn iform +Ġ ..." +Ġpur ge +Ġpic nic +ĠU mb +ĠU PDATE +ĠSapp hire +ĠSt all +le arn +Ġobject ively +Ġob liter +Ġlooph ole +Ġjour neys +Ġo mission +Pro s +ĠSid ney +pl oma +Ġspray ed +Ġg uru +Ġtra itor +Ġtim et +Ġsn apping +ĠSe vent +urn al +ĠUk ip +Ġb owed +por al +l iberal +R os +Quest ions +i OS +Ġsummar ize +ST AT +Ġ18 50 +ap est +Ġl ender +ĠVari able +br inging +ĠL ORD +, ) +Ġcollaps es +x iety +ĠN ed +Y D +ĠSch a +Ġantib ody +Ġdis band +y re +ill usion +Ġro ver +s hed +ĠHiro sh +cc i +Ġcal am +ĠMort on +P interest +Ġ19 28 +ĠE uras +ord es +Ġf ences +ĠIn ventory +ĠVal encia +ĠU d +ĠT iff +Ġsqu e +Ġqu otation +Ġtroubles ome +er ker +QU EST +ĠKing doms +s outh +Ġle vy +Pr ince +ĠSt ing +Ġnick named +Ġapp e +Ġphot ographic +Ġcorp us +re ference +ĠT rog +U nt +) =( +ĠLat via +Ġactiv ating +Ġlicense e +Ġdispar ities +ĠNews letter +ãĥĥ ãĥĪ +Ġfree ing +ĠJe ep +ĠPer ception +ins k +Ġsil icone +ĠHay den +Le an +ĠSuz uki +ibr arian +66 8 +Ġsp or +Ġcorrel ations +ag hetti +Ġtu ber +ĠIP CC +il us +ĠV u +Ġwealth iest +ĠCarb uncle +an za +Ġfool ed +ĠZ ur +Ġd addy +ran o +il ian +Ġknock out +f man +requ ired +ĠWik ileaks +ĠD uffy +ON T +Ġins ol +ĠObject s +Ġb ou +ĠNord ic +ĠIns ert +sc an +Ġd ancers +Ġid iots +major ity +ĠNev ille +ĠFree BSD +Ġt art +pan ic +69 0 +Ġcoc oa +Ġsam pled +Ġlook up +Ind ust +Ġinject ions +gen re +Ġa u +Ġroad way +Ġgen itals +K ind +ĠEx aminer +ĠY az +F resh +Ġpar alysis +ĠAl uminum +Ġre ap +ok é +Ġsl oppy +ĠTun nel +pos ium +ner y +en ic +Ġher bal +ĠOut er +ĠBuild er +Ġinc ur +Ġide ologies +Ġback ups +cons uming +ĠDet ect +de ck +ĠKN OW +ĠG ret +ĠM IC +Ġtough ness +ĠEx hibit +Ġh ive +L es +ĠSCH OOL +ĠAt ari +ald e +ĠN ull +and estine +m ouse +Ġbrig ade +48 9 +Ġrev ol +ĠLaw son +ĠW ah +op oly +eb ted +ĠS aunders +Ġ3 13 +ĠW inc +Ġtab oo +ĠHel met +Ġw edge +ch ip +ĠT ina +b g +Ġinf uri +r n +Ġanomal ies +ĠSy nc +ĠEx am +ĠComm it +ĠDi ary +ĠALS O +ĠDe bor +omed ical +Ġcomprehens ion +6 55 +Ġempower ing +Ġ ire +Ġju ices +ĠE TH +ĠBox ing +=" / +Ġfacilit ated +p oke +ĠPars ons +ĠMod er +tra vel +Ġcivil izations +Ġliber tarians +Ġrun e +ĠCl arks +at hed +Ġcampaign ers +ĠDis patch +ĠFah renheit +ĠCap com +-------- -- +Ġl ace +Ġdr aining +Ġl iner +ĠArt ificial +é n +t ask +] ). +ĠGM O +ĠOper ator +ord inary +ĠInf luence +ĠU ps +Ġpot ency +uss en +osp ons +ĠSw im +ĠDead line +Un ity +Ġcul inary +Ġenlight enment +Ġwe arer +Ġmin ed +Ġp ly +Ġinc est +ĠDVD s +W alk +B TC +Tr ade +Ġdev al +ib and +ĠOvers ight +Palest inian +Ġd art +Ġm ul +L R +Ġrem ovable +ĠReal ms +ì Ŀ +Ġmisc ar +ĠV ulkan +68 5 +è re +ĠS ap +Ġmer ging +ĠCar ly +che ster +Ġbr isk +Ġlux urious +ĠGener ator +Ġbit terness +Ġed ible +Ġ24 3 +T G +Ġrect angle +With No +bel ow +J enn +Ġdark est +Ġh itch +Ġdos age +Ġsc aven +ĠK eller +ĠIllust rated +Certain ly +ĠMaver icks +Marg inal +Ġdiarr hea +Ġenorm ously +Ġ9 99 +sh r +qu art +Ġadam ant +ĠM ew +Ġren ovation +Ġcerv ical +ĠPercent age +en ers +ĠKim ber +Ġflo ats +Ġde x +ĠW itcher +ĠSwan sea +d m +Ġsal ty +y ellow +Ġca pe +ĠDr ain +ĠPaul a +ĠTol edo +les i +Mag azine +ĠW ick +ĠM n +ĠA ck +ĠR iding +AS ON +Ġhom ophobic +AR P +Ġwand ered +C PU +ood oo +ĠP ipe +Ġtight ening +ĠBut t +3 18 +Ġdesert ed +S ession +Ġfacilit ating +J ump +Ġemer gencies +OW ER +Ġexhaust ive +ĠAF TER +Ġheart beat +ĠLab el +ack y +ĠCert ified +ilt ration +Z e +ĠU tt +Ġ13 00 +Ġpres ume +ĠDis p +Ġsur ged +Ġdoll s +Col umb +Ġchim pan +ĠR azor +Ġt icks +Ġcouncill or +Ġpilgr image +ĠReb els +ĠQ C +ĠA uction +x ia +ik k +b red +Ġinsert ion +Ġco arse +d B +SE E +ĠZ ap +ĠF oo +Ġcontem por +ĠQuarter ly +ot ions +ĠAl chemist +ĠT rey +ĠDu o +S weet +80 4 +ĠGi ov +Ġfun n +N in +h off +Ġram ifications +Ġ19 22 +ĠExper ts +az es +Ġgar ments +ar ial +ĠN ab +Ġ25 7 +ĠV ed +Ġhum orous +ĠPom pe +Ġn ylon +Ġlur king +ĠSerge y +ĠMatt is +Ġmisogyn y +ĠComp onents +ĠWatch ing +ĠF olk +ract ical +B ush +Ġt aped +Ġgroup ing +Ġbe ads +Ġ20 48 +Ġcon du +quer que +Read ing +Ġgriev ances +Ult ra +Ġend point +H ig +ĠSt atic +ĠScar borough +L ua +ĠMess i +a qu +ĠPsy Net +ĠR udd +Ġa venue +v p +J er +Ġsh ady +ĠRes ist +ĠArt emis +Ġcare less +Ġbro kers +Ġtemper ament +Ġ5 20 +T ags +ĠTurn ing +Ġut tered +Ġp edd +Ġimpro vised +Ġ: ( +Ġtab l +Ġpl ains +16 00 +press ure +ĠEss ence +marg in +friend s +ĠRest oration +Ġpoll ut +ĠPok er +ĠAugust ine +ĠC IS +ĠSE AL +or ama +Ġth wart +se ek +Ġp agan + º +cp u +Ġg arn +Ġass ortment +ĠI LCS +t ower +Recomm ended +Ġun born +ĠRandom Redditor +ĠRandomRedditor WithNo +Ġparaly zed +Ġeru ption +Ġinter sect +ĠSt oke +ĠS co +B ind +å ¾ +ĠP NG +ĠNeg ative +ĠNO AA +Le on +Ġall oy +ĠL ama +ĠD iversity +5 75 +Ġunderest imated +ĠSc or +Ġm ural +Ġb usted +so on +l if +Ġnone x +Ġall ergy +ĠUnder world +ĠR ays +ĠBl asio +Ġh rs +ĠD ir +Ġ3 27 +by ter +Ġrepl acements +Ġactiv ates +ri ved +M H +Ġp ans +ĠH I +Ġlong itudinal +Ġnu isance +al er +Ġsw ell +ĠS igned +s ci +ĠIs les +ĠA GA +Ġdef iant +Ġson ic +oc on +K C +ĠA im +t ie +ah ah +Ġm L +D X +Ġb isc +ĠBill board +ĠSY STEM +NE Y +ga ard +Ġdist ressed +former ly +Al an +Ġche fs +Ġopt ics +ĠC omet +ĠAM C +Ġredes igned +irm ation +Ġsight ings +38 2 +3 11 +ĠW B +Ġcont raction +ĠT OTAL +D ual +Ġstart led +Ġunderstand ably +Ġsung lasses +ETH OD +Ġd ocker +Ġsurf ing +ĠH EL +ĠSl ack +ton es +Ġsh alt +Vis ual +49 8 +Dep artment +c ussion +Ġunrest ricted +Ġt ad +Ġre name +employ ed +Ġeduc ating +Ġgrin ned +bed room +ĠActiv ities +ĠV elvet +ĠSW AT +Ġsh uffle +ig or +Ġsatur ation +F inding +c ream +ic ter +Ġv odka +tr acking +te c +Ġfore ground +iest a +Ġve hement +ĠEC B +ĠT ie +E y +Ġt urtles +ĠRail road +ĠKat z +ĠFram es +Ġmen ace +ĠFell owship +ĠEss ential +ugg ish +Ġdri p +ch witz +ĠKy oto +s b +ĠN ina +Param eter +Ġal arms +ĠCl aud +Ġpione ering +Ġchief ly +ĠSc ream +Col lection +Ġthank fully +ĠRonald o +åŃ IJ +st rip +ĠDisney land +com mercial +See ing +S oul +Ġevac uate +Ġc iv +ĠAs he +Ġdiv ides +ĠD agger +rehens ive +Ġber ries +ĠD F +Ġs ushi +Ġplur ality +W I +Ġdisadvant aged +Ġbatt alion +ob iles +45 1 +Ġcl ing +Ġunden iable +ĠL ounge +Ġha unt +p he +Ġquant ify +Ġdiff ered +Ġ[* ] +ĠV iz +c um +sl ave +Ġvide og +Ġqu ar +Ġbund les +ĠAl onso +t ackle +Ġneur onal +Ġlandsl ide +conf irmed +ĠDep th +Ġrenew ables +B ear +ĠMaced onia +Ġjer seys +Ġb unk +ĠSp awn +ĠControl s +ĠBuch anan +Ġrobot ics +Ġemphas izing +ĠTut orial +h yp +ist on +Ġmonument al +æ ° +ĠCar ry +Ġt bsp +en ance +H ill +art hed +Ġro tten +De an +Ġtw isting +Ġgood will +Ġimm ersion +L iving +Ġbr ushes +ĠC GI +ĠAt k +tr aditional +Ġph antom +ĠSt amina +Ġexpans ions +ĠMar in +Ġembark ed +ĠE g +int estinal +ĠPE OPLE +ĠBo oth +ĠApp alach +Ġreleg ated +V T +M IT +Ġmust er +Ġwithdraw ing +Ġmicrosc ope +ĠG athering +ĠC rescent +ĠArgent ine +ĠDec re +ĠDomin ic +Ġbud s +ant age +ĠI on +Ġwid ened +ONS ORED +ĠGl oves +iann opoulos +raz en +fe el +Ġrepay ment +Ġhind sight +ĠRE ALLY +ĠPist ol +ĠBra h +Ġwat ts +Ġsurv ives +Ġfl urry +iss y +Al ert +ĠUrug uay +Ph oenix +S low +ĠG rave +ĠF ir +Ġmanage able +Ġtar iff +ĠU DP +ĠPist ons +ĠNiger ian +Ġstrike outs +Ġcos metics +whel ming +f ab +c ape +pro xy +Ġre think +Ġover coming +sim ple +Ġw oo +Ġdistract ing +ĠSt anton +ĠTuls a +ĠD ock +65 9 +Ġdisc ord +ĠEm acs +ĠV es +ĠR OB +Ġreass uring +Ġcons ortium +Muslim s +3 21 +Ġprompt s +se i +ĠH itch +imp osed +ĠF ool +Ġindisc rim +wr ong +bu querque +D avis +! ] +Ġtim eless +ĠNE ED +Ġpestic ide +Ġrally ing +ĠCal der +Ġå ¤ +Ġx p +ĠUn le +ĠEx port +lu aj +B uff +) [ +Ġsq or +S audi +Ġis tg +Ġindul ge +pro c +Ġdisg usted +Ġcomp ounded +Ġn em +Ġschool ing +ĠC ure +process ing +S ol +Ġpro verb +it ized +ĠAlv arez +Ġscar f +Ġrect angular +re ve +Ġh ormonal +ĠSt ress +itiz en +Ġ4 25 +girl s +ĠNo ir +ĠR app +Ġmar ches +ch urch +ĠUs es +Ġ40 5 +ĠBer m +Ġord inances +ĠJud gment +Charg es +ĠZ in +Ġdust y +Ġstraw berries +Ġper ce +ĠTh ur +ĠDebor ah +net flix +ĠLam bert +Ġam used +ĠGu ang +Y OU +R GB +ĠC CTV +Ġf iat +r ang +Ġf ederation +ĠM ant +ĠB ust +ĠM are +respect ive +ĠM igration +ĠB IT +59 0 +Ġpatriot ism +Ġout lining +reg ion +ĠJos é +Ġbl asting +ĠEz ra +B s +Ġundermin es +ĠSm ooth +Ġcl ashed +rad io +Ġtransition ing +ĠBucc aneers +ĠOw l +Ġplug s +Ġh iatus +ĠPin ball +Ġm ig +ĠNut r +ĠWolf e +Ġinteg ers +Ġor bits +ĠEd win +ĠDirect X +b ite +Ġbl azing +v r +Ed ge +ĠP ID +ex it +ĠCom ed +ĠPath finder +ĠGu id +ĠSign s +ĠZ er +ĠAg enda +Ġreimburse ment +M esh +i Phone +ĠMar cos +ĠS ites +h ate +en burg +Ġs ockets +p end +Bat man +v ir +ĠSH OW +Ġprovision al +con n +ĠDeath s +AT IVE +Pro file +sy m +J A +Ġnin ja +inst alled +id ates +eb ra +ĠOm aha +Ġse izing +ĠBe asts +Ġsal ts +M ission +Gener ally +ĠTr ilogy +he on +leg ates +Ġd ime +Ġf aire +par able +G raph +Ġtotal ing +Ġdiagram s +ĠYan uk +ple t +ĠMe h +Ġmyth ical +ĠStep hens +aut ical +ochem istry +Ġkil ograms +Ġel bows +anc ock +ĠB CE +ĠPr ague +Ġimpro v +ĠDev in +Ġ" \ +par alle +Ġsuprem acists +ĠB illion +Ġreg imen +inn acle +Ġrequ isite +ang an +ĠBur lington +ain ment +ĠObject ive +oms ky +G V +Ġun ilateral +Ġt c +Ġh ires +ment al +Ġinvol untary +Ġtrans pl +ĠASC II + ¨ +Ev ents +Ġdoub ted +ĠKa plan +ĠCour age +ig on +ĠMan aging +ĠT art +Ġfalse hood +ĠV iolet +Ġair s +Ġfertil izer +Brit ain +Ġaqu atic +ou f +W ords +ĠHart ford +Ġeven ings +ĠV engeance +qu ite +G all +ĠP ret +Ġp df +ĠL M +ĠSo chi +ĠInter cept +9 20 +Ġprofit ability +ĠId le +ĠMac Donald +ĠEst ablishment +um sy +Ġgather ings +ĠN aj +Charl ie +Ġas cent +ĠProt ector +Ġal gebra +Ġbi os +for ums +EL S +Introdu ced +Ġ3 35 +Ġastron omy +Cont ribut +ĠPol ic +Pl atform +Ġcontain ment +w rap +Ġcoron ary +ĠJ elly +man ager +Ġheart breaking +c air +ĠChe ro +c gi +Med ical +ĠAccount ability +! !" +oph ile +Ġpsych otic +ĠRest rict +Ġequ itable +iss ues +Ġ19 05 +ĠN ek +c ised +ĠTr acking +Ġo zone +Ġcook er +ros is +Ġre open +Ġinf inity +ĠPharm aceutical +ens ional +Att empt +ĠR ory +Mar co +Ġawa its +H OW +t reated +Ġbol st +Ġreve red +Ġp ods +opp ers +00 10 +Ġampl itude +ric an +SP ONSORED +Ġtrou sers +Ġhal ves +ĠK aine +ĠCut ler +ĠA UTH +Ġsplend id +Ġprevent ive +ĠDud ley +if acts +umin ati +ĠY in +Ġad mon +ĠV ag +Ġin verted +Ġhast ily +ĠH ague +L yn +Ġled ger +Ġastron omical +get ting +Ġcirc a +ĠC ic +ĠTenn is +Lim ited +Ġd ru +ĠBY U +Ġtrave llers +Ġp ane +ĠInt ro +Ġpatient ly +Ġa iding +Ġlo os +ĠT ough +Ġ29 3 +Ġconsum es +Source File +Ġ"" " +Ġbond ing +Ġtil ted +Ġmenstru al +ĠCel estial +UL AR +Plug in +Ġrisk ing +N az +ĠRiy adh +Ġacc redited +Ġsk irm +é Ľ +Ġexam iner +Ġmess ing +Ġnear ing +ĠC hern +ĠBeck ham +Ġsw apped +Ġgo ose +K ay +Ġlo fty +ĠWal let +Ġ[ ' +Ġap ocalypse +Ġb amboo +ĠSP ACE +ĠEl ena +Ġ30 6 +ac ons +Ġtight ened +Ġadolesc ence +Ġrain y +Ġvandal ism +ĠNew town +Ġcon ject +c akes +Ġche ated +Ġmoder ators +par ams +E FF +Ġdece it +ĠST L +ĠTanz ania +ĠR I +Ġ19 23 +ĠEx ile +the l +Ġthe olog +Ġquir ky +ĠIr vine +Ġneed y +or is +U m +K a +Ġmail box +3 22 +Ġb os +ĠPet ra +K ING +Ġenlarg ed +O ften +Ġbad ass +Ġ3 43 +ĠPl aces +ĠC AD +Ġpr istine +Ġinterven ing +d irection +Ġl az +ĠD SM +Ġproject ing +ĠF unk +ag og +pay ment +n ov +Ġch atter +AR B +Ġexam inations +ĠHouse hold +ĠG us +F ord +4 14 +B oss +Ġmy stic +Ġle aps +ĠB av +ul z +b udget +Foot ball +Ġsubsid ized +Ġfirst hand +Ġcoinc ide +oc ular +Con n +ĠColl abor +Ġfool s +am ura +ah ar +r ists +Ġsw ollen +Ġexp ended +ĠP au +s up +Ġsp ar +Ġkey note +s uff +Ġunequ al +Ġprogress ing +str ings +ĠGamer gate +Dis ney +ĠEle ven +om nia +Ġscript ed +Ġear ners +bro ther +ĠEn abled +æ ³ +Ġlar vae +ĠL OC +m ess +Wil son +ĠTem plate +success fully +Ġparam ount +Ġcamoufl age +Ġbind s +ĠQu iet +ĠSh utterstock +r ush +Ġmasc ot +fort une +ĠCol t +ĠBe yon +hab i +Ġha irc +Ġ26 7 +ĠDe us +Ġtw itch +Ġconcent rating +Ġn ipples +c ible +Ġg ir +N Z +M ath +n ih +Requ ired +Ġp onder +ĠS AN +Ġwedd ings +Ġl oneliness +N ES +ĠMah jong +69 5 +add le +ĠGar ner +ĠC OUR +Br idge +Ġsp ree +ĠCald well +Ġbri bery +Ġ���� ���� +plug ins +Ġr acket +Ġchamp agne +vers ible +V ote +Ġmod ifiers +May or +6 80 +Ġassemb lies +ĠS ultan +ĠN ing +ĠLad ies +Ġsulf ur +Ġor bs +Ġ---- - +____ ___ +ĠJournal ism +Ġes ports +Ġl ush +Ġh ue +Ġspect ral +H onest +ãĥ ı +Ġbus hes +Ġrein forcement +Ġre opened +ĠWhe els +ĠM org +rie ving +Ġaux iliary +Ġj Query +ĠB AT +tes que +Ġver tex +p ure +f rey +ãĤ º +d os +Ġty ph +Ġc ull +Ġe q +Ġdec on +Ġtoss ing +Ġdispar ate +ĠBr igham +print f +led ged +Ġsu nd +Ġco zy +Ġhepat itis +per forming +Ġav al +ĠG G +f uture +Ġpet ertodd +ĠKos ovo +Ġmagn ets +Al ready +ĠEd ison +ĠCe res +ĠRA ID +Ġbrill iance +57 6 +Ġder ives +Ġhypert ension +ĠÎ Ķ +Ġlamb da +Ġfl air +Ġmission aries +Ġrap es +ĠSt arter +ĠMon ths +Ġdef y +Ġseism ic +ĠR aphael +Ġeuro zone +65 6 +z sche +Ġscr atched +Ġb ows +ĠLenn on +ĠGa ia +Ġdri pping +f acts +A le +Ġfrog s +ĠBre ast +ogene ity +ĠProsecut or +Ġampl ified +ĠHod g +ĠF n +Th ousands +ĠNI H +ĠMonitor ing +FT WARE +ĠPri ebus +ĠG rowing +hun ter +Ġdiagn ose +ĠM ald +ĠL R +Ġcrown ed +Ġburst ing +Ġdiss olution +j avascript +Ġuseful ness +ĠExec ution +: ( +ĠIv ory +a ah +Ġpersecut ed +viol ence +ist as +ĠCr ate +Ġimpuls es +ĠSp ani +ed es +Hand le +ĠZ erg +think able +Last ly +Ġspont aneously +Ġinconven ient +Ġdismiss ing +Ġpl otted +Ġeight y +Ġ7 37 +r ish +ĠThor nton +ath am +Ġsit com +V en +Rec ipe +t el +l und +Ġcle ars +ĠSas uke +Ġ25 8 +Ġopt ing +Ġen raged +est hetic +ĠA e +uch s +Pre p +Fl ow +Ġrun off +ĠE ating +ĠG iles +ĠAct ing +res ources +ib aba +Ġr pm +Ġske wed +ĠBl anc +ĠS akuya +Ġhot ter +Ġ19 24 +op ian +ck o +Ġcr umbling +Ġcapt ains +ĠAppropri ations +le aders +dro pping +an uts +Ġrevers ing +ĠP ose +ĠS ek +Sc ot +ĠIde a +c ise +ĠSloven ia +Ġ3 17 +Do ctor +Ġcro cod +ald i +Se a +ĠFar rell +Ġmerc enaries +ĠR NC +ĠGu ess +Ġp acing +M achine +Streamer Bot +ĠChar ity +Ġ29 8 +Ġcann ons +ĠTob y +TPP StreamerBot +ĠPass ion +cf g +Th om +Ġbad ges +ĠBern stein +. âĢĵ +ĠP OP +ĠCon j +Ġinitial ization +Ġbiod iversity +D ub +Ġfeud al +Ġdisclaim er +Ġc row +Ġign ition +ar f +S HA +Ġk Hz +h azard +ĠArt ists +oe uv +67 9 +ĠRud y +N ine +ĠRam adan +å ½ +itt o +Ġadren aline +C ert +Ġsmell ed +Ġimp unity +Ġag endas +ĠRe born +ĠCon cent +ĠSe ems +Ġo mega +ĠDust in +Ġback er +ĠSau ce +ĠBoy le +W IN +Ġsp ins +Ġpa uses +u pt +Ġshred ded +Ġstra pped +ĠCor ruption +Ġscr atches +Ġn i +Ġatt ire +ĠS AF +Factory Reloaded +ĠI PS +Ġ( % +Ġsem inar +f ocus +c ivil +Ġ18 60 +int osh +Ġcontin ual +Ġabbre vi +ĠS ok +oc obo +X M +Ġfr antic +Ġunavoid able +Ġar tery +Ġannot ations +b ath +Cl imate +Ġd ors +ĠSl ide +co ord +ĠRel oad +ĠL DL +ĠLove craft +Ġunim agin +Ġresemb led +Ġbarr acks +n p +Ġsurrog ate +Ġcategor ized +ãĤ © +Ġvacc inated +Ġdrain age +Ġind ist +ĠWhats App +Ġ18 70 +oler ance +inv oke +am orph +Ġrecon nect +Ġem anc +Ġblind ness +Ġ12 80 +intern et +c ollar +Ġalt ru +Ġab yss +ĠT RI +65 7 +Ġinf used +HE AD +Ġforest ry +ĠWood y +ĠC i +w i +s am +78 4 +hol iday +Ġmog ul +ĠF ees +ĠD EN +In ternal +ur bed +f usc +at om +ĠIll usion +Ġpoll ed +Ġfl ap +Ġco ax +L GBT +An aly +ĠSect ions +ĠCalif orn +em n +Ġh ither +ĠN IGHT +Ġn ailed +ĠPip eline +39 1 +o of +ĠPr imal +vere nd +Ġsl ashing +Ġret ri +avi our +Ġdepart ing +g il +IS C +Ġmid way +Ġultras ound +Ġbeh aving +ĠT ara +class es +V irtual +ĠColon ial +Ġstri pping +Ġorchestr ated +ĠGra ves +45 2 +ĠIron ically +ĠWrit ers +Ġl ends +ĠMan z +Ġra ven +Ġoxid ative +Ġ26 6 +EL F +act ually +asc ar +D raft +Ġfavour able +Ġhumili ating +Ġf idelity +ĠH of +ĠX uan +49 6 +Ġlay ered +at is +79 0 +Ġpay check +it on +K ar +ĠVM ware +ĠFar mer +Ġserv ic +gl omer +Ġsl ump +ĠFab ric +ĠD OC +est ing +Ġreass ure +Ġph yl +v olt +it ory +R ules +Ġoxid ation +Ġpri zed +Ġmist ress +ĠDj ango +WAR N +å ij +Ġenc ode +ĠFeed back +Ġstupid ity +I an +ĠYugoslav ia +× ¨ +ac l +UT E +19 77 +Ġqual ifies +Ġpuls es +pret ty +Ġfro ze +Ġs s +Iter ator +Ġur gently +Ġm ailed +ĠCh am +Ġsust aining +Ġbas il +Ġpupp ies +il ant +ĠP LEASE +l ap +ace ous +F ear +ĠMaster y +aut omatic +ĠT AG +Ġant im +ag les +47 3 +fram es +Ġwh ispers +ĠWho ever +Ġbra very +ĠUK IP +ract ions +"" " +Ġt ame +Ġpart ed +every thing +CON T +Ġind ebted +Ġadd r +re k +IR ED +Ġem inent +cl inton +Ġo usted +Ġreview er +Ġmelt down +Ġre arr +ĠY ao +the real +aby te +Ġst umbling +Ġbat ches +Ġ25 9 +Ġcontrace ptive +Ġprost itute +ens is +De cl +ĠSt rikes +M ilitary +ĠO ath +v acc +pp ings +05 2 +Ġpart Name +amp ing +Rep orts +K I +CH R +Ġsubt ly +sw ers +Bl ake +us ual +Ġcontest ants +Ġcart ridges +ĠGRE AT +Ġbl ush +ĠâĢ º +47 2 +Ġreason ed +ãĥ ¤ +paralle led +Ġd yn +ag ate +Ġnight ly +å Ĩ +55 6 +Ġsem antic +ĠAdv oc +Ġ !! +Ġdisag rees +ĠB W +V eh +Ġharm ing +Ġembr aces +Ġstri ves +Ġin land +ĠK ard +Ġhe ats +ĠGin ny +ut an +ern aut +yl ene +ĠE lev +J D +Ġh ars +ĠStar r +Ġsk ysc +Ġcollabor ators +Us ually +Ġrev olutions +ĠSTAT S +Ġdism antle +Ġconfident ly +Ġkin etic +Al i +Ġpercent ile +Ġextract ing +ill ian +est ead +Ġphysic ists +ĠMarsh al +Ġfell owship +Ġd ashed +ĠU R +ĠSi oux +ĠComp act +am ide +P ython +ĠLe igh +ĠPharm ac +ist rates +her ical +Ġf ue +ĠE min +Ġ( { +ĠNeighbor hood +Ġdisrupt ing +ĠD up +Ġg land +ĠSe v +ĠMar ian +arg on +ĠD und +Ġ< !-- +Ġstr and +Ġstadium s +z os +Ġpsych osis +ĠR ack +Ġbrilliant ly +ï¸ ı +Ġsubmer ged +ĠInst it +ĠCh ow +Ġc ages +ĠH ats +ĠU rs +Ġdil uted +us at +ien ne +ĠMembers hip +ĠBur k +Ġ ie +Ġarche type +D rug +ult on +ĠSp ock +ĠMcK ay +ĠDep end +F eatured +S oc +19 78 +ĠB ere +Ġrelent lessly +Ġcripp ling +Ġar thritis +çĶ Ł +ĠTrop ical +ĠBul g +ĠCher yl +Ġadm irable +Ġsub title +Over ride +Ġorig inating +ĠC CP +Ġsw ore +ĠSo le +ĠDis orders +3 29 +Ġprocess ion +Ġref urb +Ġimm ersed +requ ently +Ġskept ics +Ġcer amic +m itter +en stein +b elt +ĠT IT +b idden +Ġf ir +m ist +> ] +Ġwe ave +ĠParad ox +Ġentr usted +ĠBarcl ays +Ġnovel ist +og ie +80 6 +Ġnin ety +Ġdisag reements +@@@@ @@@@ +ĠAus chwitz +c ars +ĠL ET +t ub +arant ine +P OS +Ġback story +Ġcheer ful +ĠR ag +ek a +bi ased +Ġinexper ienced +ak ra +ĠW itt +t an +Ġrap ist +Ġplate au +ch al +ĠInqu is +exp ression +Ġc ipher +Ġsh aving +add en +re ly +( \ +ism a +ĠReg ulatory +CH AR +ily n +N VIDIA +G U +Ġmur m +la us +Christ opher +Ġcontract ual +ĠPro xy +ĠJa ime +ĠMethod ist +Ġstew ards +st a +per ia +Ġphys iology +Ġbump ed +Ġf ructose +Austral ian +ĠMet allic +ĠMas querade +ar b +Ġprom ul +Ġdown fall +Ġbut cher +Ġb our +ĠIN FORMATION +ĠB is +pect s +ad ena +Ġcontempl ating +ar oo +cent ered +ĠPe aks +Us ed +Ġmod em +Ġg enders +Ġ8 000 +37 1 +Ġm aternity +ĠR az +Ġrock ing +Ġhandgun s +ĠD ACA +Aut om +ĠN ile +Ġtum ult +ĠBenef it +ĠAppro ach +works hop +ĠLe aving +G er +inst ead +Ġvibr ations +Ġrep ositories +49 7 +ĠA unt +ĠJ ub +ĠExp edition +Al pha +Ġs ans +Ġoverd ue +Ġoverc rowd +Ġlegisl atures +Ġp aternal +ĠLeon ardo +Ġexp ressive +Ġdistract ions +Ġsil enced +tr ust +Ġb iking +Ġ5 60 +Ġpropri et +Ġimp osition +Ġcon glomer +Ġ= ================================================================ +ĠTe aching +ĠY ose +int ensive +T own +Ġtroll ing +ĠGr ac +ĠAS US +Y o +Ġspecial s +ĠNep h +ĠGod zilla +Dat abase +ĠHe gel +Ġ27 2 +19 76 +ĠGl oria +Ġdis emb +ĠInvestig ations +ĠB ane +ag ements +St range +Ġtre asury +ĠPl ays +Ġundes irable +Ġwid ening +Ġverb ally +Ġinf ancy +Ġcut ter +f ml +Ġ21 00 +prot otype +f ine +Ġdec riminal +Ġdysfunction al +Ġbes ie +ĠErn st +z eb +Ġnort heastern +Ġa ust +por ate +ĠMar lins +Ġsegreg ated +ew orld +ĠMa her +Ġtra verse +Ġmon astery +ur gy +G ear +s and +Com pl +ĠE MP +Ġpl ent +ĠMer cer +Ġ27 6 +TA BLE +Config uration +H undreds +Ġpr ic +Ġcollabor ating +ĠPar amount +ĠCumm ings +Ġ( < +Ġrecord er +Ġfl ats +Ġ4 16 +wh ose +Font Size +ĠOr bit +Y R +Ġwr ists +Ġb akery +) } +ĠB ounty +ĠLanc aster +Ġend ings +acc ording +ĠSal am +e asy +75 5 +ĠBur r +ĠBarn ett +onom ous +Un ion +Ġpreced ence +ĠScholars hip +ĠU X +Ġroll out +Ġbo on +al m +ĠCan ter +æ µ +Ġround ing +Ġcl ad +Ġv ap +ĠF eatured +is ations +Ġ5 40 +pol ice +Ġunsett ling +Ġdr ifting +ĠLum ia +ĠObama Care +ĠF avor +Hy per +ĠRoth schild +ĠMil iband +an aly +ĠJul iet +H u +Ġrec alling +a head +69 6 +Ġunf avorable +Ġd ances +O x +Ġleg ality +Ġ40 3 +rom ancer +Ġinqu ire +ĠM oves +\ "> +ĠVari ant +ĠMess iah +ĠL CS +ĠBah á +75 6 +Ġeyeb row +Ġ ¥ +ĠMc F +ĠFort y +M as +Ġpan icked +Ġtransform ations +q q +Ġrev olves +ring e +ĠA i +ax e +Ġon ward +ĠC FR +ĠB are +log in +Ġliqu ids +Ġde comp +second ary +il an +ĠCon vert +ami ya +Ġprosecut ing +Ġâī ¡ +ĠYork ers +ĠByr ne +sl ow +aw ei +J ean +Ġ26 9 +ĠSky dragon +Ġ é +ĠNicarag ua +ĠHuck abee +ĠHigh ly +Ġamph ib +ĠPast or +ĠL ets +Ġbl urred +Ġvisc eral +ĠC BO +Ġcollabor ated +z ig +Leg al +Ġapart heid +Ġbr id +Ġpres et +ĠD ET +ĠAM A +× Ķ +arch ing +auc uses +build er +Ġpo etic +Ġem ulator +ĠMole cular +Ġhon oring +ise um +Ġtract or +ĠCl uster +ĠCal m +ared evil +Ġsidew alks +Ġviol in +Ġgeneral ized +ĠAle c +Ġemb argo +Ġfast ball +ĠHT TPS +ĠL ack +ĠCh ill +ri ver +C hel +ĠSw arm +ĠLev ine +ro ying +L aunch +Ġkick er +Ġadd itive +ĠDe als +W idget +cont aining +Ġescal ate +ĠOP EN +Ġtwe aked +Ġst ash +Ġsp arks +ĠEs sex +ĠE cc +Ġconv ict +Ġblog ging +I ER +ĠH L +Ġmurd erers +75 9 +ĠH ib +Ġde pl +ĠJ ord +S ac +Ġdis sect +ĠHow e +os her +Ġcustom izable +ĠFran z +Ġat ro +Ä ĩ +Ġ000 4 +Ġout post +R oss +Ġglyph osate +ĠHast ings +ĠBE FORE +Ġsh ove +o pped +ĠSc ala +Ġam ulet +an ian +Ġexacerb ated +Ġe ater +47 1 +UM E +Ġpul p +izont al +ĠZ am +ĠAT I +imm une +aby tes +Ġunnecess arily +ĠC AT +ĠAx is +Ġvisual ize +à ī +ĠRad ical +f m +Doc uments +ĠFor rest +Ġcontext ual +ĠSy mbol +Ġtent ative +ĠDO ES +ĠGood s +Ġintermitt ent +} : +medi ated +Ġridic ule +Ġathe ism +Ġpath ogens +ĠM um +Ġre introdu +Ġ30 7 +i HUD +Ġflash light +Ġsw earing +Ġp engu +B u +Ġrot ated +ĠCr ane +Ġ() ); +Ġfashion able +Ġendors ing +46 3 +) [ +Ġingest ion +Ġcook s +Ġ9 50 +ot omy +ĠIm am +Ġk a +Ġte aser +ĠGhost s +ĠãĤ µ +19 69 +Ï ĥ +ub by +Ġconver ter +zan ne +end e +ĠPre par +ĠNic kel +ĠChim era +h im +ĠTyr ann +ĠSabb ath +ĠNich ols +Ġra pt +ih ar +Ġshe lling +Ġillum inate +Ġdent ist +ut or +ĠInteg ration +Ġwh ims +ĠLiter ary +Be aut +Ġp archment +ag ara +Br and +Ġder og +âĢ¦ ) +ĠNor se +Ġunw itting +Ġc uc +Ġborder line +Ġupset ting +Ġrec ourse +Ġd raped +ĠRad ar +Ġcold er +ĠPep si +im inary +], [ +65 8 +V i +ĠF rem +ĠP es +Ġveter inary +ĠT ED +ĠEp idem +n ova +k id +Ġdev out +o ct +j ad +M oh +ĠP AY +Ġge ometric +Ġ3 23 +Ġcircum ference +ich ick +19 75 +ĠY uri +ĠSh all +ĠH over +un in +S pr +Ġg raft +ĠHapp iness +Ġdisadvant ages +att acks +Ġhub s +ĠStar Craft +é ĸ +Ġgall eries +ĠKor ra +Ġgrocer ies +ĠGors uch +Ġrap ists +Ġfun gi +ĠTyph oon +V ector +ĠEm press +b attle +4 68 +Ġparas ite +ĠBom ber +S G +ex ist +ĠP f +Ġun se +Ġsurge ons +B irth +ĠUn sure +ĠPrint ed +ĠBehavior al +ĠA ster +Pak istan +Ġun ethical +Ġs v +ĠIo T +Ġlay outs +P ain +Ġconst ants +ĠL W +ĠB ake +Ġtow els +Ġdeterior ation +ĠBol ivia +Ġblind ed +ĠW arden +ĠMist ress +Ġon stage +Ġcl ans +ĠB EST +19 60 +Ġant ique +Ġrhet orical +ĠPer cy +ĠRw anda +, . +B ruce +Ġtra umat +ĠParliament ary +Ġfoot note +id ia +ĠLear ned +se eking +gen ic +Ġdim ensional +H ide +èĢ ħ +Ġintrig ue +in se +Ġle ases +Ġapp rentices +w ashing +Ġ19 26 +V ILLE +Ġsw oop +s cl +Ġbed rooms +on ics +ĠCr unch +comp atible +Ġincap ac +ĠYemen i +ash tra +z hou +d anger +Ġmanifest ations +ĠDem ons +AA F +Secret ary +ACT ED +L OD +Ġam y +ra per +eth nic +4 17 +Ġpos itives +Ġ27 3 +ĠRefuge es +Ġus b +ĠV ald +odd y +ĠMahm oud +As ia +Ġskull s +ĠEx odus +ĠComp et +ĠL IC +ĠM ansion +ĠA me +Ġconsolid ate +storm s +ont ent +99 6 +Ġcl en +Ġm ummy +fl at +75 8 +ĠV OL +oter ic +n en +ĠMin ute +S ov +Ġfin er +R h +ly cer +Ġreinforce ments +ĠJohann es +ĠGall agher +Ġgym n +S uddenly +Ġext ortion +k r +i ator +T a +Ġhippocamp us +N PR +ĠComput ing +Ġsquare ly +Ġmod elling +ĠFor ums +ĠL isp +ĠKrish na +Ġ3 24 +Ġr ushes +Ġens ued +Ġcre eping +on te +n ai +il ater +ĠHorn ets +Ġob livious +IN ST +55 9 +Ġjeopard y +Ġdistingu ishing +j ured +Ġbeg s +sim ilar +ph ot +5 30 +ĠPark way +Ġs inks +ĠHearth stone +ib ur +ĠBat on +Av oid +Ġd ancer +Ġmag istrate +ary n +Ġdisturb ances +ĠRom ero +Ġpar aph +Ġmis chief +âĸ ĵ +ĠSh aria +Ġur inary +r oute +iv as +f itted +Ġeject ed +ĠAl buquerque +Ġ4 70 +Ġirrit ated +ĠZ ip +ĠB iol +à į +Ġden ounce +Ġbin aries +ĠVer se +Ġopp os +ĠKend rick +ĠG PL +Ġsp ew +ĠEl ijah +ĠE as +Ġdr ifted +so far +Ġannoy ance +ĠB ET +47 4 +ĠSt rongh +it ates +ĠCogn itive +oph one +ĠIdent ification +ocr ine +connect ion +Ġbox er +ĠAS D +ĠAre as +Y ang +t ch +ull ah +Ġdece ive +Comb at +ep isode +cre te +W itness +Ġcondol ences +ht ar +Ġhe als +Ġbuck ets +ĠLA W +B lu +Ġsl ab +ĠOR DER +oc l +att on +ĠSteven son +ĠG inger +ĠFriend ly +ĠVander bilt +sp irit +ig l +ĠReg arding +ĠPR OG +Ġse aling +start ing +Ġcard inal +ĠV ec +ĠBe ir +Ġmillisec onds +we ak +per se +Ġster ile +ĠCont emporary +ĠPh ant +ĠCl o +Ġout p +Ġex iled +Ġ27 7 +Ġself ie +Ġman ic +Ġn ano +ter ms +Alex ander +Ġres olves +Ġmillenn ia +Ġexpl odes +Ġconst ellation +Ġadul tery +m otion +D OC +Ġbroad casters +Ġkinderg arten +ĠMay weather +ĠE co +ich o +Ġ28 7 +l aun +Ġm ute +Ġdisc reet +Ġpres chool +Ġpre empt +De lete +ĠFre ed +P i +H K +Ġblock er +ĠC umber +Ġw rought +d ating +Ġins urer +Ġquot as +Ġpre ached +Ġev iction +ĠReg ina +ĠP ens +Ġsevent een +ĠN ass +D ick +Ġfold s +Ġd otted +ĠA ad +Un iversal +Ġp izz +ĠG uru +Ġso ils +Ġno vice +ĠNe ander +Ġst ool +Ġdeton ated +ĠPik achu +ĠMass ive +IV ER +ĠAb del +Ġsubdu ed +Ġtall est +Ġprec arious +Ġa y +r ification +ĠOb j +c ale +Ġun question +cul osis +ad as +igr ated +D ays +Ġque ens +ĠGaz ette +ĠCol our +ĠBow man +ĠJ J +ï ve +Ġdomin ates +Stud ent +Ġm u +Ġback log +ĠElect ro +Tr uth +48 3 +Ġcond ensed +r ules +ĠCons piracy +Ġacron ym +hand led +ĠMat te +j ri +ĠImp ossible +l ude +cre ation +Ġwar med +ĠSl ave +Ġmis led +Ġfer ment +ĠK ah +ink i +ke leton +cy l +ĠKar in +Hun ter +Reg ister +ĠSur rey +Ġst ares +ĠW idth +ĠN ay +ĠSk i +Ġblack list +uck et +Ġexp ulsion +im et +Ġret weet +vant age +Fe ature +Ġtro opers +Ġhom ers +9 69 +Ġconting ency +ĠW TC +ĠBrew er +fore ign +W are +S olar +Ġund ue +RE C +ulner able +path ic +ĠBo ise +Ġ3 22 +Ġarous ed +ĠY ing +ä¸ į +uel ess +Ġp as +Ġmor p +Ġfl oral +Ex press +ud ging +k B +ĠGr anted +Ø ¯ +ĠMich a +ĠGoth ic +ĠSPEC IAL +ĠRic ardo +F ran +Ġadminister ing +6 20 +por a +Ġ ® +Ġcomprom ises +Ġb itten +Ac cept +Th irty +Ð ² +Ġmater ially +ĠTer r +ig matic +ch ains +Ġdo ve +stad t +Mar vel +FA ULT +Ġwind shield +Ġ3 36 +ad ier +Ġsw apping +Ġflaw less +ĠPred ator +ĠMiche le +Ġprop ulsion +ĠPsych ic +Ġassign ing +Ġfabric ation +Ġbar ley +l ust +Ġtow ering +Ġalter cation +ĠBent ley +Sp here +Ġtun a +ĠClass es +Fre edom +un er +L ady +v oice +Ġcool est +or r +Ġpal p +$ { +Ġhyster ia +ĠMet atron +p ants +Ġspawn ing +Exper ts +ĠInvest ors +ĠAn archy +Ġshr unk +ĠVict im +Ġ28 9 +Ġec stasy +ĠB inding +58 5 +ĠMel ody +57 8 +ot ally +ĠE tsy +lig a +Ġapplaud ed +Ġswe ating +Ġredist ributed +Ġpop corn +Ġsem inal +f ur +ĠNeuro science +R and +ĠO st +ĠMadd en +ĠIncre asing +ĠDaw kins +ĠSub way +Ġar sen +cons erv +B UR +Ġsp iked +ĠLy ft +ĠImper ium +ĠDrop box +Ġfav oured +Ġencomp asses +gh ost +Ġins pires +Ġbur geoning +ĠY oshi +ĠVert ical +ĠAud itor +Ġint ending +Ġfilib uster +Bl oom +f ac +ĠCav s +ign ing +Ġcowork ers +ĠBarb arian +rem ember +FL AG +Ġaudit ory +ason ry +Col lege +Ġmut ed +gem ony +ob in +ĠPsych o +9 68 +Ġlav ish +Ġhierarch ical +ĠDr one +ou k +Ġcripp led +ĠMax im +Sl ot +Ġqu iz +ĠV id +if ling +Ġarchae ologists +Ġabandon ment +d ial +le on +ĠF as +T ed +Ġr aspberry +Ġmaneu vers +Ġbehavi ours +Ġins ure +Ġrem od +Sw itch +h oe +Ġsp aced +Ġafford ability +ĠF ern +not ation +ĠBal anced +Ġoccup ies +en vironment +Ġneck lace +Ġsed an +F U +ĠBrav o +Ġab users +ĠAn ita +met adata +ĠG ithub +ait o +ĠF aster +ĠWass erman +ĠF lesh +Ġth orn +r arily +ĠMer ry +w ine +Ġpopul ace +ĠL ann +Ġrepair ing +Ġpsy che +Ġmod ulation +aw aru +âĢĭ âĢĭ +ari j +Ġdecor ations +Ġapolog ise +ĠG arg +app ly +Ġgive away +ĠFl an +ĠWy att +U ber +Ġauthor ised +ĠMor al +HAHA HAHA +activ ate +Ġtorped o +ĠF AR +Ġam assed +ĠA ram +ark in +ĠVict ims +st ab +Ġo m +ĠE CO +Ġopio ids +Ġpurpose ly +ĠV est +Ġer g +at an +ĠSur gery +Ġcorrect ing +ĠOrt iz +ĠBe et +Ġrev oke +Ġfre eway +ĠH iggins +F ail +ĠFar ms +ĠAT P +h ound +Ġp oking +ĠCommun ists +mon ster +iment ary +Ġunlock ing +Ġunf it +we ed +en ario +at ical +ĠEnlight enment +ĠN G +ĠComp ensation +de en +ĠWid ow +ĠCind y +ĠAfter wards +Ġ6 000 +ikh ail +ag ically +Ġrat ified +Ġcasual ty +H OME +p sey +f ee +Ġspark ling +Ġd é +Ġconcert ed +C atal +Ġcomp lying +ĠA res +ĠD ent +Sh ut +Ġsk im +ad minist +Ġhost ilities +ĠG ins +Ġ6 08 +Ġm uddy +ĠMc Int +ĠDec ay +5 25 +Ġconspic uous +ĠEx posure +Ġresc ind +Ġwear able +Ġ3 28 +our met +ah s +ĠRob ots +Ġe clips +inst ance +ĠRE PORT +ĠApp l +0 30 +ĠSk ies +01 00 +Ġfall acy +S ocket +ĠRece iver +Ġsol ves +ĠButter fly +ĠSho pping +ĠFI RE +65 4 +Med ic +Ġsing ers +ĠNeed less +'' '' +isher s +ĠD ive +58 8 +Ġselect ively +Ġcl umsy +88 9 +Ġpurch aser +ear ned +ard y +Ġbenef iting +eng lish +Ġyield ing +ĠP our +Ġspin ach +Ġdel ve +ĠC rom +6 10 +Ġexport ing +ĠMA KE +Ġ26 3 +Ġg rop +Ġenv oy +ĠInqu iry +ĠLu igi +d ry +ĠT uring +Thumbnail Image +ĠVar iety +Ġfac et +Ġfl uffy +Ġexcerpt s +Ġsh orth +ĠOl sen +CL UD +Ġrel iant +ĠUN C +T our +Ġbat hing +Comp any +Ġglobal ization +P red +ĠMalf oy +Ġh oc +j am +craft ed +ĠBond s +ĠKiss inger +Eng land +Ġorder ly +cat entry +Ġ26 1 +Ġexch anging +ĠInt ent +ĠAmend ments +D OM +Ġst out +³³³³³³³³ ³³³³³³³³ +ĠAir bus +Ġ27 8 +hy de +P oll +Item ThumbnailImage +Ġlooph oles +ĠPill ar +Ġexpl or +St retch +A part +Ġun married +Lim it +ĠTransform ers +Ġintellect ually +unct ure +18 00 +Ġd arn +B razil +Ġleft over +ber us +f red +Mine craft +3 26 +ĠForm s +Ġproof s +ĠDes igned +Ġindex es +ĠSupp ose +EM S +ĠL oving +ĠBon nie +im ating +OT US +Ġconduct or +Ġbehav ed +ĠF ren +Ġsy nerg +Ġmillenn ium +Ġcater ing +ĠL auder +W r +ĠY iannopoulos +ĠAT F +Ġensl aved +Ġawaken ed +D VD +ĠED ITION +ĠConc ert +ĠChall enger +ĠH aku +umer ic +Ġdep recated +ĠSH AR +4 12 +Ġdy stop +Ġtremb ling +Ġdread ed +ĠSp ac +p adding +Re pl +ĠG arrison +M ini +Ġun paralleled +am ar +URR ENT +w reck +c ertain +t al +ĠC LS +app ings +Ġsens ed +Ġf encing +ĠPas o +ĠDes k +Ġsc off +Ġcontem plate +ĠL iga +l iquid +75 7 +Ġapp rentice +ĠUCH IJ +5 70 +ĠTh ousand +ĠIll um +Ġchampion ed +ãĤ Į +Ġelect ors +Ġ3 98 +ĠH ancock +round ed +ĠJ OHN +Ġuns atisf +Ġqual ifier +ĠGad get +EN E +Ġdead liest +ĠPl ants +Ġ ions +Ġacc ents +Ġtwe aking +Ġsh aved +F REE +ĠCh aser +Again st +9 60 +Ġmeth amphetamine +Ġnormal ized +Ġ$ \ +ĠPre cision +ĠGu am +Ġch oked +ĠX II +ĠCast ing +Tor rent +Ġscal p +ĠJagu ar +w it +Ġsem ic +ix ie +ĠG ould +Ġconf ines +N usra +ĠL on +ĠJ ugg +y cle +ĠCod ec +E gypt +Ġrest rain +ĠAl iens +Ġch oking +ĠD unk +ĠBell a +ab c +Ġsl ang +Ġneuro trans +s av +Ġempower ment +â ĨĴ +Ġclim bers +ĠM im +ĠF ra +ros se +Cap ital +ĠCth ulhu +Inter face +Ġprof icient +ĠIN TO +Ġ3 18 +ront al +5 80 +ĠDes pair +K enn +Ġscrim mage +ĠCo at +as ions +Ġwall paper +ĠJ ol +Ġresurg ence +Ġant iv +ĠB alls +² ¾ +Ġbuff ers +Ġsub system +ĠSt ellar +ĠL ung +A IDS +Ġerad icate +Ġblat antly +Ġbehav es +ĠN un +Ġant ics +ex port +DE V +w b +Ġph p +ĠInteg rity +Ġexplore r +Ġrev olving +auth ored +g ans +Ġbas k +Ġas ynchronous +å į +TH ING +69 8 +G ene +ĠR acer +ĠN ico +iss ued +Ġser mon +p ossibly +Ġsize of +Ġentrepreneur ial +ox in +ĠMin erva +Ġpl atoon +n os +ri ks +A UT +ĠAval anche +ĠDes c +ij 士 +ĠP oc +Ġconf erred +Î » +Ġpat ched +F BI +66 2 +Ġfract ures +Ġdetect s +Ġded icate +Ġconstitu ent +Ġcos mos +W T +Ġswe ats +Ġspr ung +b ara +s olid +Ġuns us +Ġbul ky +ĠPhilipp e +ĠFen rir +Ġtherap ists +ore al +^^ ^^ +Ġtotal ed +Ġboo ze +ĠR PC +Prosecut ors +Ġdis eng +ĠSh ared +Ġmotor cycles +Ġinvent ions +Ġlett uce +ĠMer ge +ĠJ C +Ġspiritual ity +ĠWAR NING +Ġunl ucky +ĠT ess +Ġtong ues +ĠD UI +T umblr +Ġle ans +Ġinv aders +Ġcan opy +ĠHur ricanes +ĠB ret +ĠAP PLIC +id ine +ick le +Reg arding +Ġve ggies +Ġe jac +ju ven +F ish +D EM +ĠD ino +Th row +ĠCheck ing +be ard +( & +Ġj ails +Ġh r +trans fer +iv ating +Ġfle ets +ĠIm ag +ĠMc Donnell +Ġsnipp et +Is a +ĠCh att +ĠSt ain +ĠSet FontSize +ĠO y +ĠMathemat ics +49 4 +Ġelectro ly +ĠG ott +ĠBr as +B OOK +ĠF inger +d ump +Ġmut ants +Ġrent als +Ġinter tw +Ġc reek +ail a +Bro ther +ĠDisc ord +pe e +raw ler +Ġcar p +Ġ27 9 +ãĤ· ãĥ£ +rel ations +Ġcontr asts +Col umn +Ġrec onnaissance +Ġun know +Ġl ooting +Ġregul ates +Ġopt imum +ĠChero kee +ĠA ry +Lat est +Ġroad side +Ġd anced +ĠUnic orn +A cknowled +Ġuncont roll +ĠM US +at io +ch ance +ha ven +VAL UE +Ġfavour ites +Ġceremon ial +b inary +pe ed +wood s +EM P +Ġv ascular +Ġcontempl ated +Ġbar ren +ĠL IST +Y ellow +ospons ors +Ġwhisk y +ĠM amm +ĠDeV os +min imum +H ung +44 2 +P ic +ĠSnap dragon +77 6 +Ġcar ving +Ġund ecided +Ġadvantage ous +Ġpal ms +ĠA Q +Ġst arch +L oop +Ġpadd le +Ġfl aming +ĠHor izons +An imation +bo ost +Ġprob abilities +ĠM ish +Ġex odus +ĠEditor ial +Ġfung us +Ġdissent ing +ĠDel icious +rog ram +ĠD yn +d isk +t om +Ġfab rics +ĠC ove +ĠB ans +Ġsoft en +ĠCON S +Ġin eligible +Ġestim ating +ĠLex ington +pract ice +of i +Ġshe dding +ĠN ope +Ġbreat hed +ĠCorinth ians +y ne +ek i +B ull +Ġatt aching +reens hots +Ġanaly se +ĠK appa +Ġuns ustainable +Ġinter pol +ank y +he mer +Ġprot agonists +Ġform atted +ĠBry ce +ĠAch illes +ĠAb edin +sh ock +Ġb um +b os +qu a +ĠW arn +q t +ĠDi abetes +8 64 +ĠIn visible +Ġvan ish +Ġtrans mitting +Ġmur ky +ĠFe i +Ġawa ited +ĠJur assic +umm ies +Ġmen acing +g all +C ath +B uilt +ild o +ĠV otes +Ġon t +Ġmun itions +ĠFre em +ÃŃ n +Ġdec ency +lo pp +ie ved +ĠG ord +Ġun thinkable +ĠNews week +Ġ3 21 +He at +Ġpresent er +ji ang +Ġpl ank +ĠAval on +Ġben z +ĠR out +Ġslam ming +ĠD ai +ou ter +ĠCook ie +ĠAlic ia +ge y +Ġvan ity +Ġow l +á µ +t ested +ĠAw akens +Ġcan v +Ġblind ly +ĠRid ley +ĠEm ails +Requ ires +ĠSer bian +ograp hed +if rame +eter ia +Ġaltern ating +qu iet +Ġsoc iology +ĠUn lock +ĠCommun ism +Ġo ps +Ġatt ribution +Ġab duction +ĠAb ram +Ġsidel ined +ĠB OOK +Ġref ining +ĠFe eling +ĠOs lo +ĠPru itt +r ack +ang ible +Ġcaut iously +ĠM ARK +eed s +M ouse +ĠStep h +ĠP air +S ab +99 7 +ĠBa al +B ec +Ġcomm a +ĠP all +ĠG ael +Ġmisunder stand +ĠP esh +Order able +Ġdis mal +ĠSh iny +% " +Ġreal istically +Ġpat io +ĠG w +ĠVirt ue +Ġexhaust ing +wh atever +oph ys +y ip +4 18 +Ad just +ĠWa iting +ess on +ĠMaz da +ĠDo zens +Ġstream lined +Ġincompet ence +ĠM eth +Ġeth os +ON ES +Ġincent iv +Ġgr itty +ĠBut cher +Head er +Ġexp onential +à Ł +Ġcorrel ate +Ġcons ensual +s ounding +R ing +Orig in +Ġcon clusive +fe et +ac ly +ĠF ernandez +Buy able +Ġd ucks +aunt lets +Ġel ong +Ġ28 6 +Ġsim ul +G as +ĠK irst +Ġprot r +ĠRob o +ĠAo E +op ol +Ġpsych ologically +sp in +ilater ally +ĠCon rad +W ave +44 1 +ĠAd vertisement +ĠHarm on +ĠOri ental +is Special +Ġpresum ptive +Ġw il +ĠK ier +ne a +Ġp pm +Ġhar bour +ĠW ired +comp any +Ġcor oner +atur days +ĠP roud +ĠN EXT +ĠFl ake +val ued +ce iver +Ġfra ught +Ġc asing +Ġrun away +Ġg in +ĠLaure nt +ĠHar lem +ĠCur iosity +qu ished +Ġneuro science +ĠH ulu +Ġborrow er +Ġpetition er +ĠCo oldown +W ARD +Ġinv oking +conf idence +For ward +Ġst s +pop ulation +Delivery Date +Fil m +ĠC ov +quick Ship +quickShip Available +prim ary +isSpecial Orderable +inventory Quantity +channel Availability +BO X +ĠMulti player +ĠJen ner +77 8 +ĠM d +Ġ~ /. +M N +Ġchild ish +Ġantioxid ant +ĠChrom ebook +Ġ27 4 +Ġscreen play +Ġadvent urous +ĠRelations hip +respons ive +ming ton +Ġcorner stone +ĠF ey +F IR +Ġrook ies +ĠF eaturing +Ġorig inate +Ġelectro des +ant es +Ġscript ures +Ġgl ued +Ġdiscont ent +Ġaff licted +lay out +B rave +Ġm osa +ĠQuant ity +ĠH ik +w inner +H ours +Ġent ail +ĠCell s +olog ue +Ġv il +Ġpre acher +Ġdecor ative +d ifferent +Ġprejud ices +ĠSm oking +ĠNotting ham +so Type +Ġrhyth ms +ĠAl ph +bl ast +Ste el +ĠDaniel le +Ġstr ife +Ġrem atch +so DeliveryDate +ĠF ork +t rip +ol ulu +hes es +C G +ĠPOLIT ICO +ost a +ĠDr ift +é¾įå ¥ +é¾įå¥ ij士 +Ġvet ting +ĠJin ping +ĠRec ession +Min or +ĠF raud +enf ranch +Ġconven ed +ĠNA ACP +ĠMill ions +ĠFarm ing +ĠW oo +ĠFl are +rit o +imm igrant +Ġvac ancy +ĠHE AD +ĠV aj +eg al +ĠV igil +Stud y +Ġru ining +Ġr acks +Ġhe ater +ĠRand olph +ĠBr ush +ĠT ir +Ø ¨ +Ġc ov +% ] +Ġrecount s +ĠO PT +ĠM elt +Ġtr uce +Ġcas inos +Ġcrus ade +Ġcarn age +Ġstri pe +ĠK yl +Text ures +Ġ6 98 +Ġpro clamation +Ġgood ies +Ġ........ .. +pro claimed +P olit +Ġtop ical +Ġspecial ize +ĠA min +g m +Ġanch ored +Ġbear ings +s ample +ĠHigh land +ĠAut ism +Ġmerc enary +Ġinterview er +L ER +ĠSom ers +Ġembry o +ĠAss y +Ġ28 1 +ĠEd iting +ĠCh osen +6 60 +Ġp ci +ĠThunder bolt +BI LL +Ġchuck led +jri wal +h of +Ġearth ly +() { +ind ependence +Ġdisp ers +ĠV endor +ĠG areth +Ġp als +P enn +ĠSub mit +ic um +Th u +Ġcl andestine +Ġcann ibal +ĠCl erk +E Stream +gal itarian +âĻ ¥ +g ew +Ġhor rend +ĠL ov +ĠRe action +ocr in +Class ic +Ġecho ing +Ġdiscl osing +ĠIns ight +og un +ĠInc arn +upload s +pp erc +guy en +Ġ19 01 +ĠB ars +68 7 +Ġb ribes +ĠFres no +ur at +ĠRe ese +Ġintr usive +Ġgri pping +ĠBlue print +ĠR asm +un ia +man aged +ĠHeb do +Ġ3 45 +Ġdec oding +Ġpo ets +Ġj aws +ĠF IGHT +am eless +ĠMead ows +ĠHar baugh +Inter view +ĠH osp +ĠB RA +Ġdelet ion +m ob +W alker +ĠMoon light +ĠJ ed +ĠSoph ia +Ġus ur +Ġfortun ately +ĠPut ting +ĠF old +Ġsan itation +Ġpart isans +IS ON +B ow +ĠCON C +ĠRed uced +ĠS utton +Ġtouch screen +Ġembry os +âĢ¢âĢ¢ âĢ¢âĢ¢ +ĠK rug +com bat +ĠPet roleum +Ġam d +ĠCos mos +Ġpresc ribing +Ġconform ity +ours es +Ġplent iful +Ġdis illusion +ĠEc ology +itt al +Ġf anc +Ġassass inated +regn ancy +Ġperenn ial +ĠBul lets +Ġst ale +Ġc ached +ĠJud ith +ĠDise ases +All en +Ġl as +Ġsh ards +ĠSu arez +ĠFriend ship +inter face +ĠSupp orters +add ons +46 2 +ĠIm ran +ĠW im +Ġnew found +ĠM b +An imal +Ġd arling +and e +Ġrh y +ĠTw isted +pos al +yn ski +Var ious +× ľ +ĠK iw +uy omi +Ġwell being +ĠL au +an os +Ġunm ist +Ġmac OS +Ġrest room +ĠOl iv +ĠAir ways +Ġtimet able +9 80 +Ġrad ios +v oy +ias co +Ġcloud y +ĠDraw ing +Any thing +Sy ria +ĠH ert +st aking +Ġun checked +Ġb razen +ĠN RS +69 7 +onom ic +est ablish +Ġl eng +Ġdi agonal +ĠF ior +L air +ĠSt ard +Ġdef icient +jo ining +be am +Ġomn ip +Ġbl ender +Ġsun rise +Mo ore +ĠF ault +ĠCost ume +ĠM ub +Fl ags +an se +Ġpay out +ĠGovern ors +ĠD illon +ĠBan ana +N ar +Ġtra iled +Ġimperial ist +um ann +ats uki +4 35 +ĠRoad s +Ġsl ur +ĠIde ally +Ġt renches +C trl +Ġmir rored +ĠZ el +ĠC rest +Comp at +ĠRoll s +sc rib +ĠTra ils +omet ers +w inter +Ġimm ortality +il ated +Ġcontrad icts +un iversal +ill ions +ĠM ama +opt im +AT URE +Ġge o +et ter +ĠCar lo +4 24 +Ġcanon ical +ĠStrongh old +n ear +Ġperf ume +Ġorche stra +od iac +Ġup he +Ġreign ing +vers ive +Ġc aucuses +ĠD EM +Ġinsult ed +Ġ---- -- +ĠCr ush +Ġroot ing +ĠWra ith +Ġwh ore +Ġto fu +C md +ĠB ree +Ġ$ _ +Ġr ive +ĠAd vertising +Ġw att +ĠH O +Ġpersu asive +ĠParam eters +Ġobserv ational +ĠN CT +ĠMo j +ĠSal on +Ġtr unc +Ġexqu isite +ĠMar a +Ġpo op +ĠAN N +Ex c +ĠWonder ful +ĠT aco +Ġhome owner +ĠSmith sonian +orpor ated +mm mm +Ġlo af +ĠYam ato +ĠInd o +Ġcl inging +á s +Ġimm utable +h ub +Or ange +Ġfingert ips +ĠWood en +ĠK idd +ĠJ PM +ĠDam n +C ow +c odes +48 2 +Ġiniti ating +ĠEl k +ĠCut ting +Ġabsent ee +ĠV ance +ĠLil ith +G UI +Ġobsc ured +Ġdwar ves +ĠCh op +ĠB oko +Val ues +Ġmult imedia +Ġbrew ed +Reg ular +CRIP TION +ĠMort al +Ġa pex +Ġtravel er +Ġbo ils +Ġspray ing +Rep resent +ĠStars hip +4 28 +Ġdisappro val +Ġshadow y +Ġlament ed +ĠRe place +ĠFran ç +67 7 +d or +Ġunst oppable +Ġcoh orts +gy n +ĠClass ics +ĠAm ph +Ġsl uggish +ĠAdd iction +ĠPad res +Ġins cription +Ġin human +min us +ĠJere miah +at ars +Ter ror +ĠT os +ĠSh arma +ast a +c atch +Ġpl umbing +ĠTim bers +Sh ar +H al +ĠO sc +Ġcou pling +hum ans +Ġsp onge +Ġid ols +ĠSp a +ĠAdv ocate +ĠBe ats +lu a +Ġtick ing +Ġload er +ĠG ron +8 10 +Ġstim ulated +Ġside bar +ĠManufact urer +ore And +19 73 +Ġpra ises +ĠFl ores +dis able +ĠElect rical +ra ise +E th +Ġmigr ated +Ġlect urer +K ids +ĠCa vern +Ġk ettle +Ġgly c +ĠMand ela +ĠF ully +å§ « +FIN EST +Ġsquee zing +ĠRy der +amp oo +oreAnd Online +Inst oreAndOnline +Buyable InstoreAndOnline +Ġcommem orate +ĠRamp age +Aust in +ĠSh roud +ĠRu ins +9 15 +ĠK H +Ġwater front +ĠE SC +b aby +ĠC out +ĠEm blem +Ġequival ents +49 2 +Un ique +ĠNiet zsche +brow ser +Ġim itation +ĠWere wolf +ĠKir in +ac as +' ," +Ġà ¾ +Review ed +Ġc unt +Ġvo ic +ĠLen ovo +Ġbond ed +48 1 +Ġinhib itors +Ġendeav ors +ĠHav ana +ĠSt out +ĠJ olly +A ctor +*/ ( +Ġoccur rences +ĠT ens +Incre ased +ĠACT ION +Ġ ãĢĮ +ĠRank ings +ĠB reat +Ġ30 9 +D ou +Ġimpact ing +ĠDuc hess +pre fix +Q B +Ġsummon ing +Ġbest owed +ĠKe pler +ĠPOW ER +c ube +ĠK its +ĠG rip +Ġop ium +Ġrep utable +t oc +ich ael +ĠR ipple +Ġcaf é +ĠZ oom +ĠBur ma +Ġwa ive +Ġst alls +Ġdem eanor +inc erity +Ġfluor ide +ĠSH OULD +Par is +Ġlong ing +Ġpl at +Ġgross ly +Ġbull s +Ġshowc asing +ex pected +ĠG addafi +engine ering +Re peat +ĠK ut +Ġconce ivable +Ġtrim med +osc ope +ĠCand idate +ĠT ears +rol og +Lew is +S UP +Ġroad map +Ġsal iva +Ġtrump et +Jim my +Ġmirac ulous +Ġcolon ization +Ġam put +ĠGN OME +ate ch +D ifferent +ĠE LE +ĠGovern ments +ĠA head +ãħĭ ãħĭ +word press +L IB +ĠIn clude +ĠDor othy +0 45 +ĠColomb ian +Ġle ased +88 4 +Ġde grading +ĠDa isy +i ations +Ġbapt ized +Ġsurn ame +co x +Ġblink ed +ãĥ ¢ +Ġpoll en +Ġder mat +Ġre gex +ĠNich olson +ĠE ater +ç ľ +rad or +Ġnarrow er +Ġhur ricanes +Ġhalluc inations +r idden +ISS ION +ĠFire fly +Ġattain ment +Ġnom inate +Ġav ocado +ĠM eredith +Ġt s +Ġreve rence +Ġe uph +Ġcr ates +ĠT EXT +Ġ4 43 +Ġ3 19 +J SON +iqu ette +Ġshort stop +ic key +Ġpro pelled +Ġap i +ĠTh ieves +77 9 +Ġovers aw +Ġcol i +ĠNic ola +Ġover cl +ik awa +ĠC yr +Ġ38 4 +78 9 +ĠAll ows +10 27 +Det roit +TR Y +set up +ĠSocial ism +Sov iet +s usp +ĠAP R +ĠShut down +Ġal uminium +zb ek +ĠL over +GGGG GGGG +Ġdemocr acies +Ġ19 08 +ĠMer rill +ĠFranco is +gd ala +Ġtraff ickers +ĠT il +ĠGo at +Ġsp ed +ĠRes erv +Ġpro d +55 2 +Ġc ac +ĠUn iv +ĠSch we +Ġsw irling +ĠWild erness +ĠEgg s +Ġsadd ened +Ġarch aic +H yd +Ġexcess ively +B RE +Ġaer ospace +ĠVo ices +Cra ig +Ġign ited +In itially +ĠMc A +Ġhand set +Ġreform ing +Ġfrust rations +ĠDead pool +ĠBel ichick +ract or +ĠRagnar ok +ĠD rupal +ĠApp roximately +19 20 +ĠHub ble +arm or +ĠSar as +ĠJon as +Ġnostalg ic +Ġfeas ibility +Sah aran +Ġorb iting +Ġ9 70 +R u +Ġsh in +ĠInvestig ators +Ġinconsist encies +ĠP AN +B G +Ġgraz ing +Ġdetect ors +ĠStart up +ĠFun ny +ĠNa omi +Consider ing +Ġh og +ut f +ce mic +Ġfort ified +ĠFun ctions +Ġcod ec +nut rition +H at +" ! +micro soft +55 8 +ĠTh in +ĠA CE +Al ias +ĠO PS +p apers +P K +ãĢ İ +Ġimpro bable +N orthern +equ al +Ġlook out +Ġty res +ĠMod ified +ĠK op +Abs olutely +Ġbuild up +sil ver +Ġaud i +Ġgro tesque +ĠSab er +ĠPres byter +ON Y +Ġglac iers +ĠSho als +ĠK ass +ĠH RC +ĠNic ol +ĠL unch +ĠF oss +âĸ Ĵ +AD RA +ĠOne Plus +o ing +ground s +Ġincident al +Ġdatas ets +68 9 +ĠClarks on +Ġassemb ling +ĠCorrect ions +Ġdrink ers +Ġqual ifiers +Ġle ash +Ġunf ounded +ĠH undred +Ġkick off +T i +Ġrecon cil +ĠGr ants +ĠCompl iance +ĠDexter ity +Ġ19 06 +w arn +D allas +Max imum +n ard +av ia +be aut +ens itivity +tr ace +Ġpione ers +ĠF ract +ãĢ ı +Ġpre cept +Ġgloss y +ĠI EEE +Ac ross +Ġ6 80 +S leep +che on +Ġsatir ical +ĠMin otaur +ĠCla ude +Ġr é +ape go +Ġcar rot +ĠSem in +ino a +Ġz o +Ind ependent +Ġdiagn oses +ĠC ue +M AR +Ġrend ition +ĠK ik +Ġpath ology +Ġselect s +Link edIn +Ġass ay +ĠD res +Ġtext ual +post ed +IT AL +ĠM aul +N eal +Ġinter connected +Ġerr atic +ĠVir us +Ġ5 30 +Ġenvironmental ists +ĠP helps +Ġeng agements +ĠIN ST +Ġeconom ical +nox ious +Ġg earing +izz y +Ġfavor ably +ĠMcG ill +T erm +Ġh anged +Ġball park +ĠRe yes +Ġbe ware +ĠP sal +ĠMass acre +q i +Ġin accessible +acly sm +Ġfr ay +ill ac +Ġbitter ly +ĠCert ification +Mich igan +Ġir respective +al ore +Em pty +Ġendorse ments +Ġund et +f g +equ ipped +Ġmerc iless +ĠC ust +Ġimm ature +Ġvou cher +ĠBlack well +Ñ ı +h awk +dis ciplinary +ile e +ĠMak oto +ĠD ude +ãĥĩ ãĤ£ +Y ears +Ġin ver +Ġsh aman +ĠY ong +ip el +ell en +ĠCath y +br ids +Ġs arc +65 1 +N ear +Ġground work +Ġam az +Ġ4 15 +ĠHunting ton +hew s +ĠB ung +Ġarbit rarily +ĠW it +ĠAl berto +Ġdis qualified +best os +46 1 +Ġp c +Ġ28 4 +ro bat +Rob in +Ġh ugs +ĠTrans ition +ĠOcc asionally +Ġ3 26 +ĠWh ilst +ĠLe y +Ġspaces hip +cs v +Ġun successfully +ĠA u +le ck +ĠWing ed +ĠGrizz lies +. � +Ġne arer +ĠSorce ress +ĠInd igo +El se +8 40 +let es +Co ach +Ġup bringing +ĠK es +Ġseparat ist +Ġrac ists +Ġch ained +Ġabst inence +lear ning +Ġrein stated +Ġsymm etry +Ġremind ers +ĠChe vy +Ġm ont +Ġexempl ary +ĠT OR +Z X +Ġqual itative +ĠSt amp +ĠSav annah +ĠRoss i +Ġp aed +Ġdispens aries +ĠWall s +ĠCh ronic +Ġcompliment ary +ĠBeir ut +Ġ+ --- +igs list +Ġcrypt ographic +mas ters +ĠCap itals +Ġmax imal +Ġent ropy +Point s +Ġcombat ants +l ip +ĠGl ob +ĠB MC +ph ase +th ank +HT TP +Ġcomm uter +Ġ\( \ +.. / +ĠReg ener +ĠDO I +ĠActiv ision +Ġsl it +os al +RE M +Ġch ants +Y u +Ke ys +Bre xit +ĠFor ced +Ari zona +Ġsquad ron +IS O +ĠMal one +Ġ3 38 +Ġcontrast ing +Ġt idal +Ġlib el +Ġimpl anted +Ġupro ar +ĠC ater +Ġpropos itions +M anchester +ĠEuro s +it amin +G il +ĠEl ven +ĠSe ek +ĠB ai +Ġredevelop ment +ĠTown s +ĠL ub +! ", +al on +K rist +Ġmeas urable +Ġimagin able +Ġapost les +Y N +7 60 +Ġster oid +Ġspecific ity +ĠL ocated +ĠBeck er +ĠE du +ĠDiet ary +uts ch +ĠMar ilyn +Ġbl ister +ĠM EP +ĠK oz +ĠC MS +y ahoo +ĠCar ney +Ġbo asting +ĠC aleb +By te +read s +ad en +Pro blem +ĠWood ward +S we +S up +ĠK GB +Set up +Ġtac it +Ġret ribution +Ġd ues +ĠM ü +. ? +ä¸ Ń +p ots +Ġcame o +ĠP AL +educ ation +A my +like ly +g ling +Ġconstitution ally +ĠHam m +ĠSpe ak +Ġwid gets +br ate +Ġcra ppy +ĠI ter +Ġanticip ating +ĠB out +P ixel +ĠY ep +ĠLaur ie +Ġh ut +Ġbullet in +ĠSal vation +Ġch ats +ear able +Honest ly +AL TH +onse qu +c ult +isco very +ovy ch +Ġse lves +ĠSat oshi +S ounds +Ġconver gence +ĠRosen berg +19 74 +Ġnas al +Ġfull est +Ġfer ocious +x us +ist e +AM S +Ġlobb ied +Ġso othing +ĠGun n +t oday +0 24 +Ġinspir ational +ĠN BN +p b +g ewater +or ah +all owed +ĠCol iseum +Ġspecial izing +Ġinsane ly +ĠT ape +del ay +Ġt arn +ĠP ound +Ġmel anch +Ġdeploy ments +il and +Ġless en +Ġfur ry +ĠUE FA +Ġblood shed +ĠMe ier +ither ing +Ġhe irs +ĠJ aw +ax ter +ĠPublic ations +Ġal ters +int ention +ĠWinc hester +d etermination +ĠLif etime +th in +Mon ster +7 80 +Ġapprox imation +Ġsuper markets +ĠSecond s +or os +h uge +Ġb ribe +ĠLIM ITED +un ed +Ġmis interpret +ĠIn jury +Ġ3 67 +Ġthreshold s +ĠCarn ival +Ġgastro intestinal +Ġguid eline +Ġde ceived +f eatures +Ġpurported ly +ĠRon nie +ĠNew t +Ġsp acious +as us +Ġsuperhero es +ĠCyn thia +le gged +k amp +ch io +Ġth umbnail +ĠShir ley +ill ation +Ġshe ds +ĠZ y +E PA +Ġdam s +Ġy awn +n ah +ĠPe ggy +ĠE rie +ĠJu ventus +ĠF ountain +r x +don ald +al bum +ĠComp rehensive +Ġc aching +ĠU z +ulner ability +ĠPrinc iple +ĠJ ian +ing ers +cast s +ĠOs iris +ch art +t ile +ĠTiff any +ĠPatt on +ĠWh ip +Ġovers ized +J e +ĠCind erella +ĠB orders +ĠDa esh +M ah +Ġdog ma +Ġcommun ists +v u +Coun cil +Ġfresh water +Ġw ounding +Ġdeb acle +Ġyoung ster +Ġthread ed +ĠB ots +ĠSav ings +ãģ Ĥ +ol ing +oh o +Ġillum ination +M RI +Ġlo osen +tr ump +ag ency +ur ion +Ġmoment arily +ĠCh un +ĠBud apest +ĠAl ley +D isk +Ġaston ished +ĠCon quer +ĠAccount ing +h aving +ĠWe in +ĠAl right +Ġrev olver +Ġdel usion +Ġrelic s +Ġad herent +qu ant +Ġhand made +or io +Ġcomb ating +c oded +Ġquad ru +re th +N ik +ĠTrib al +ĠMyster ious +Ġin hal +ĠWin ning +ĠClass ification +ch anged +Ġun ab +Ġsc orn +icip ated +w l +ond uctor +Ġrein forcing +ĠChild hood +an ova +Ġadventure r +Ġdoctor al +ĠStrateg ies +Ġengulf ed +ĠEnc ounter +Ġl ashes +Crit ical +ric ular +ĠU TF +oci ation +check ing +ĠConsult ing +Run time +per iod +ĠAs gard +Ġdist illed +ĠPas adena +ĠD ying +ĠCOUN TY +Ġgran ite +Ġsm ack +Ġparach ute +ĠS UR +Virgin ia +ĠF urious +78 7 +ĠO kin +Ġcam el +ĠM bps +19 72 +ĠCh ao +ĠC yan +j oice +ef er +ĠW rap +ĠDeb ate +S eg +Ġfore arm +ĠIgn ore +Ġtim estamp +Ġprob ing +ĠNo on +ĠGra il +f en +Ġdorm ant +ĠFirst ly +ĠE ighth +ĠH UN +ĠDes ire +or as +Girl s +ĠDes mond +z ar +am ines +O AD +exec ute +Ġbo obs +ĠAT L +_ ( +Chel sea +Ġmasturb ation +ĠCo C +Ġdestroy er +ĠCh omsky +Ġsc atter +ĠAss ets +79 6 +ĠC argo +Ġrecept ive +ĠSc ope +Ġmarket ers +Ġlaun chers +Ġax le +ĠSE A +se q +ĠM off +f inding +ĠGib bs +Georg ia +extreme ly +N J +Ġlab orers +st als +Ġmed iation +ĠH edge +at own +Ġi od +des pite +v ill +J ane +ex istence +Ġcoinc ided +ĠUt ilities +ĠChe ap +Ġlog istical +Ġcul mination +ĠNic otine +p ak +F older +Ġrod ents +st uff +Ġlaw fully +Ġreper to +io ch +j j +Dial ogue +HH HH +lic tion +Look s +Ġ29 7 +Ġtur rets +ĠAb andon +Ġinc ess +ĠTraff ord +Ġcur led +Ġprefer ring +Ġprivat ization +Ġir resist +ĠP anda +ĠSh ake +ĠMc Gr +ãĥ Ħ +und ers +Ġdiscrim inated +Ġbart ender +I LE +Atl antic +Ġprop ensity +ĠW iz +ĠG im +con ference +Ġrein forces +G h +w agon +Ġe erie +F al +Ġhug ged +rac ist +R IC +F u +Ġf iller +ĠSt ub +Ġeng raved +ĠWrest le +Ġimagin ative +ĠPe er +ĠFact ors +an us +ĠDrac ula +mon itor +Ġrou ters +ib ia +ĠBoo lean +end ale +ĠSl aughter +ĠSh ack +R FC +ĠSpiel berg +S ax +ĠPH OTO +ĠCl over +ĠR ae +Dep ending +ĠMem or +ar am +Ġpier ced +Ġcur tains +v ale +ĠInqu isition +ĠP oke +Ġforecast ing +Ġcompl ains +S ense +ĠHer mes +isc overed +Ġb ible +ĠMor ph +Ġg erm +78 5 +D ON +Ġcon gen +Ġcr ane +ĠD PR +Ġrespect fully +R oom +ĠN aw +ĠDal ai +re ason +ĠAng us +Educ ation +ĠTitan ic +Ë ľ +Ġo val +un ited +Ġthird s +Ġmoist ur +ĠC PC +M iami +Ġtent acles +ĠPol aris +ex c +ex clusive +ĠPra irie +Ġcol ossal +ĠBl end +sur prisingly +ÃŃ s +Ġindo ctr +Ġbas al +ĠMP EG +und o +Spl it +Develop ment +Ġlan tern +19 71 +Ġprov ocation +Ġang uish +ĠB ind +ĠLe ia +duc ers +ipp y +conserv ancy +Ġinitial ize +ĠTw ice +ĠSu k +Ġpred ic +Ġdi ploma +Ġsoc iop +Ing redients +Ġhamm ered +ĠIr ma +Q aida +Ġglim ps +ĠB ian +Ġst acking +Ġf end +gov track +Ġun n +dem ocratic +ig ree +Ġ5 80 +Ġ29 4 +Ġstraw berry +ID ER +Ġcher ished +ĠH ots +Ġinfer red +Ġ8 08 +ĠS ocrates +O regon +ĠR oses +ĠFO IA +Ġins ensitive +Ġ40 8 +Recomm end +ĠSh ine +Ġpain staking +UG E +ĠHell er +ĠEnter prises +I OR +ad j +N RS +L G +Ġalien ated +Ġacknowled gement +ĠA UD +ĠRen eg +Ġvou chers +Ġ9 60 +Ġm oot +ĠDim ensions +Ġc abbage +B right +g at +ĠK lu +Ġlat ent +Ġz e +ĠM eng +Ġdis perse +Ġpand emonium +H Q +Ġvirt uous +ĠLoc ations +ee per +prov ided +Ġse ams +ĠW T +iz o +PR OV +Ġtit anium +Ġrecol lection +Ġcr an +Ġ7 80 +ĠN F +49 1 +64 2 +p acking +59 8 +text ure +Sp ider +fre edom +cipl ed +ĠTAM ADRA +âĻ ¦ +aut hent +ĠW ANT +r ified +Ġr ites +Ġuter us +k iss +Ġâī ¤ +Ġsk illet +Ġdis enfranch +ĠGa al +Comp an +Ġage ing +gu ide +B alt +Ġiter ator +Ġdiscretion ary +t ips +Ġprim ates +ĠTechn ique +ĠPay ments +az el +ĠR OCK +stant ial +0 60 +Ġd mg +ĠJack ets +ĠPlay off +Ġnurs ery +ĠSy mb +art on +Ġannex ation +Color ado +Ġco ils +ĠSh oes +âĦ¢ : +ĠRo z +COM PLE +ĠEve rest +ĠTri umph +J oy +G rid +à ¼ +process or +ĠPros per +ĠSever us +ĠSelect ed +r g +ĠTay yip +St ra +Ġski ing +Ġ? ) +Ġpe g +Tes la +Ġtime frame +Ġmaster mind +ĠN B +scient ific +ĠSh it +gener ic +IN TER +N UM +Ġst roll +ĠEn ix +ĠM MR +ĠE MS +m ovie +Ĥ ª +Ġminim izing +idd ling +Ġilleg itimate +Ġprot otyp +Ġpremature ly +Ġmanual s +obb ies +ĠCass idy +D EC +des ktop +Ġaer os +Ġscreen ings +Ġdeb ilitating +ĠGr ind +nature conservancy +Ġf ades +ter mination +assets adobe +F actor +Ġdefinitive ly +P oké +ap ult +ĠLaf ayette +C orn +ĠCor al +Ġstagn ant +T ue +Ġdissatisf action +G ender +Ġkid neys +ĠG ow +ĠDef eat +ĠAsh ton +Ġcart els +Ġfore closure +ĠExpl ore +stre ngth +ot in +Ġveterin arian +Ġf umble +Ġpar ap +ĠSt rait +r ils +Ġpr ick +ĠBerm uda +ĠAm munition +skin ned +Ġab ound +ĠB raz +Ġshar per +ĠAsc ension +Ġ9 78 +Ġpreview s +Ġcommun ion +ĠX Y +Ġph ony +Ġnewcom er +Ġ3 32 +." ," +Ġredist ribution +Prot ect +ĠSo f +K al +Ġlip stick +w orst +Ġtang led +Ġretrospect ive +int eger +Ġvolunte ering +Ġ19 07 +Ġ -------------------- +ic hen +Ġunve iling +Ġsen seless +Ġfisher ies +\ - +Ġh inges +Ġcalcul us +My th +Ġund efeated +Ġoptim izations +Ġdep ress +Ġbill board +ĠY ad +ĠPy ramid +Is n +I de +Ġleg ion +ĠK ramer +ent anyl +Ġpenet rating +ĠHaw th +ĠPR ODUCT +ĠGer ard +ĠP act +ĠIn cluding +ĠEl ias +ĠEl aine +vis ual +Ġhum ming +Ġcond esc +ĠF asc +ä¸ Ĭ +Ġe galitarian +Ġdev s +ĠD ahl +O ps +D H +ĠB ounce +id ated +ald o +Ġrepublic an +Ġh amb +ĠS ett +ograph ies +CH APTER +Ġtrans sexual +Ġsky rocket +ans wer +Ġmark up +Ø ª +Ġhero ine +Comp are +ĠT av +Be ast +Ġsuccess ors +Ġna ïve +ĠBuck ley +st ress +me at +Ġdownload able +Ġindex ed +Ġsc aff +ĠL ump +ĠHom o +Stud io +In sp +Ġr acked +far ious +ĠPet ty +Ex ternal +Ġ19 09 +W ars +com mit +put ers +Ġun ob +ĠEr r +ĠE G +ĠAl am +ĠSiber ia +ĠAtmosp heric +IS TER +ĠSatan ic +trans lation +ĠL oud +tra umatic +l ique +Ġreson ate +ĠWel ch +Ġspark ing +ĠT OM +t one +Ġout l +Ġhandc uffed +ĠSer ie +8 01 +Ġland marks +ĠRee ves +Ġsoft ened +Ġdazz ling +ĠW anted +month s +Mag ikarp +Ġunt reated +ĠBed ford +M i +ĠDynam o +O re +79 5 +Ġwrong ful +Ġl ured +Ġcort isol +Ġve x +d rawn +ile t +Download ha +ĠF action +Ġlab yrinth +Ġhij acked +w aters +er ick +Ġsuper iors +ĠRow ling +ĠGu inness +Ġt d +99 2 +Ġune arthed +Ġcentr if +Ġsham eless +P od +ĠF ib +Ġ icing +Ġpredict or +Ġ29 2 +fore station +con struct +C and +@ # +Ġag itated +Ġre pr +OV A +Ġkn itting +ĠLim a +Ġf odder +68 4 +ĠPerson a +k l +7 01 +Ġbreak up +á ¸ +Ġapp alled +Ġantidepress ants +ĠSus sex +Har ris +ĠTher mal +ee ee +U pload +Ġg ulf +Ġdoor step +ĠSh ank +L U +ĠM EN +ĠP ond +s orry +Ġmis fortune +n ance +Ġb ona +M ut +Ġde graded +ĠL OG +ĠN ess +an imal +Ġa version +und own +Ġsupplement ed +ĠC ups +Ġ50 4 +Ġdep rive +ĠSpark le +Å Ĥ +ĠMed itation +auth ors +ĠSab an +ĠN aked +air d +ĠMand arin +ĠScript ures +ĠPerson nel +ĠMahar ashtra +Ġ19 03 +ĠP ai +ĠMir age +omb at +Access ory +Ġfrag mented +T ogether +Ġbelie vable +ĠGl adiator +al igned +ĠSl ug +M AT +Ġconvert ible +ĠBour bon +amer on +ĠRe hab +nt ax +Ġpowd ered +pill ar +Ġsm oker +ĠMans on +ĠB F +5 11 +ĠGood ell +ĠD AR +m ud +g art +Ġob edient +ĠTrans mission +ĠDon ation +8 80 +Ġbother ing +Material s +ãĤ ± +dest roy +Ġfore going +Ġanarch ism +ĠK ry +ice ps +Ġl ittered +ĠSch iff +Ġanecd otal +un its +Ġf ian +ĠSt im +ĠS OME +ĠInv aders +Ġbehaviour al +ĠVent ures +Ġsub lime +Ġfru ition +ĠPen alty +Ġcorros ion +¶ ħ +Ġlik ened +Ġbesie ged +ween ey +ĠCre ep +Ġlinem en +mult i +ic ably +ud der +Ġvital ity +Ġshort fall +ĠP ants +ap ist +H idden +ĠDro ps +med ical +Ġpron unciation +ĠN RL +Ġinsight ful +J V +ĠBe ard +ĠCh ou +Ġchar ms +Ġb ins +Ġamb assadors +ĠS aturdays +Ġinhib itor +ĠFr anch +6 01 +', ' +ĠCon or +art ney +ĠX peria +g rave +be es +ĠProtest ants +Ġso aking +ĠM andal +Ġph ased +Ġ6 60 +Ġsc ams +Ġbuzz ing +ĠItal ians +ĠLoren zo +ĠJ A +Ġhes itated +Ġcl iffs +ĠG OT +ingu ishable +Ġk o +Ġinter ruption +Z ip +Lear ning +Ġundersc ores +ĠBl ink +K u +57 9 +ĠAut ob +I RE +Ġwater ing +Ġpast ry +8 20 +Ġvision ary +ĠTempl ar +awa ited +Ġpist on +Ġant id +current ly +Ġp ard +Ġw aging +Ġnob ility +ĠY us +Ġinject ing +f aith +ĠP ASS +å º +Ġret ake +ĠPR OC +Ġcat hedral +b ash +Ġwrest lers +Ġpartner ing +Ġn oses +Ġ3 58 +Trans form +am en +Ġb outs +ĠId eal +ĠConstant in +Ġse p +ĠMon arch +att en +ĠPe oples +mod ified +Ġmor atorium +Ġpen chant +Ġoffensive ly +Ġprox ies +ok ane +ĠTaiwan ese +ĠP oo +ĠH OME +us ional +Ġver bs +ĠO man +vis ory +Ġpersu asion +Ġmult it +Ġsc issors +G ay +ow ay +oph ysical +l us +gn u +Ġap ocalyptic +Ġabsurd ity +Ġplay book +Ġautobi ography +I UM +Ġsne aking +ĠSim ulation +pp s +ell ery +Plan et +Ġright fully +Ġn iece +ĠN EC +ĠIP O +ĠDis closure +lean or +ous y +ST ER +Ġ28 2 +Cru z +Ch all +64 3 +ĠSurv ive +ĠF atal +ĠAm id +ap o +We apons +D EN +7 70 +ĠGreen wald +Ġlin en +al os +Ġpollut ants +ĠPCI e +k at +Ġp aw +ĠK raft +C hem +ĠTermin ator +Ġre incarn +Ġ] [ +ĠSe eds +Ġsilhou ette +ĠSt ores +Ġgro oming +ĠD irection +ĠIs abel +ĠBr idges +ðŁ ij +E ED +ĠM orsi +Ġval ves +ĠRank ed +ĠPh arma +ĠOrgan izations +Ġpenet rated +ĠRod ham +ĠProt oss +Ġove rest +Ġex asper +ĠT J +Ġ 000000 +Ġtrick le +Ġbour bon +WH O +Ġw retched +Ġmicrosc opic +Ġcheck list +Ġad orned +R oyal +Ad minist +ĠRet irement +ĠHig hest +We ather +ile ge +Ġincre ments +ĠC osponsors +Ġmas se +ĠS inn +r f +Ġh ordes +as sembly +75 4 +ĠNat asha +ĠTY PE +ĠGEN ERAL +Ġarr anging +Ġ40 7 +l ator +Ġg lean +Ġdisc redited +Ġclin icians +UN E +Ġachie ves +ĠEm erson +com plex += [ +Ġprincip ally +Ġfra il +p icked +Ġthan king +Ġre cl +ĠL AST +Ġsupp ressing +il ic +Ġantidepress ant +ĠLis bon +Ġth or +Ġsp a +Ġking doms +ĠPear ce +em o +Ġpl ung +Ġdiv est +Ġ ******************************** +b is +osp els +ad r +Sp irit +hall a +P ink +end ez +Ġresurrect ed +esc ape +ĠRosen stein +Ġge ological +Ġnecess ities +Ġcarn iv +ĠE lys +ĠBar ney +Ġ29 6 +dig y +ST ON +D OWN +Ġmil estones +Ġk er +Ġdismant ling +Ġre prim +Ġcross ings +19 45 +Ġpatri archy +Ġblasp hemy +Ġ3 59 +met ry +ĠOb esity +ĠDiff erences +bl ocking +ãĥķ ãĤ¡ +ich ita +ĠSab ha +ph alt +ĠCol o +ual a +effic ients +ĠMed ina +con sole +55 7 +ĠHann ibal +ĠHab it +ĠF ever +Ġthen ce +Ġsyn agogue +Ġessential s +Ġw ink +ĠTr ader +ID A +ĠSp oiler +ĠIceland ic +ĠHay ward +Ġpe ac +Ġmal ice +Ġflash back +Ġth w +Ġlay offs +L iquid +Ġtro oper +Ġh inge +ĠRead ers +Ph ill +ĠB auer +Cre ated +Ġaud its +ac compan +Ġunsus pecting +ier a +6666 6666 +Ġbro ch +Ġapprehend ed +ĠM alk +cer ning +ĠCod ex +O VER +M arsh +ĠD eng +ĠExp ression +Ġdisrespect ful +Ġasc ending +t ests +ĠPlaint iff +ster y +ĠAl ibaba +din and +ĠDem psey +Applic ations +mor al +Ġthrough put +Ġquar rel +Ġm ills +Ġhe mor +ĠC ASE +terror ist +st im +ifest yle +ro zen +CE PT +Ar k +u ci +lect ic +Ġirrit ating +she ets +A y +Ġrede emed +Ġhorn y +ĠTe ach +ĠS ear +dem ocracy +4 65 +ĠRest ore +Ġstand by +ĠP is +iff in +Ġsleep y +Ġextr ater +Ġcompl iments +Fram eworks +Ġinstall s +Ġb anging +sur face +found land +Ġmetaph ysical +Ġ28 3 +oul s +dev ices +Ar gs +ĠSac rifice +ĠMcC orm +es on +Cons ervative +ĠM ikhail +see ing +is ively +ĠRo oms +ĠGener ic +Ġenthusi astically +Ġgri pped +Ġcomed ic +ĠElectric ity +Ġgu errilla +Ġdec oration +ĠPerspect ive +Ġconsult ations +Ġun amb +Ġplag iar +Ġmagic ian +Ġe rection +ĠTour ism +or ied +ro xy +11 00 +T am +Ī è +Î ³ +× ª +ĠPred ators +Nit rome +Ġtelesc opes +project s +Ġun protected +Ġst ocked +ĠEnt reprene +nex pected +Ġwast ewater +V ill +Ġint imately +Ġi Cloud +ĠConst able +Ġspo of +Ġne farious +Ġfin s +Ġcens or +ĠMod es +ĠEs per +ar bon +Ġinter sections +Ġlaud ed +Ġphys i +Ġgener ously +ĠThe Nitrome +ĠTheNitrome Fan +Ġar isen +ĠÙ Ī +Ġg lands +ĠPav ilion +ĠGu pta +Ġuniform ly +Ġr amps +ri et +ĠWH EN +ĠVan essa +Ġrout ed +Ġlim p +ĠC PI +p ter +int uitive +Ġv aping +Ġexperiment ed +ĠOlymp us +ĠAm on +Ġsight ing +Ġinfiltr ate +ĠGentle man +Ġsign ings +ĠMe ow +ĠNav igation +che cks +4 33 +Ġel apsed +ĠBulg arian +esp ie +ĠS OM +d uring +Ġsp ills +anc a +ĠPly mouth +M AL +Ġdomest ically +ĠWater gate +ĠF AM +k illed +ed ited +ĠYour self +Ġsynchron ization +ĠPract ices +ST EP +Ġgen omes +ĠQ R +not ice +Ġloc ating +z in +Ġ3 29 +al cohol +Ġk itten +V o +Ġr inse +Ġgrapp le +ĠSc rew +ĠD ul +A IR +Ġle asing +ĠCaf é +Ġro ses +ĠRes pect +Ġmis lead +Ġperfect ed +Ġnud ity +Ġnon partisan +ĠCons umption +Report ing +Ġnu ances +Ġdeduct ible +ĠSh ots +Ġ3 77 +Ġæ ľ +ano oga +Ben ef +ĠB am +ĠS amp +if ix +Ġgal van +ĠMed als +rad ius +Ġno bles +Ġe aves +igr ate +K T +ĠHar bour +u ers +Ġrisk ed +re q +Ġneuro t +get table +ain a +Rom ney +Ġunder pin +Ġlo ft +ĠSub committee +ĠMong ol +b iz +Ġmanif ests +ass isted +ĠG aga +Ġsy nergy +Ġreligious ly +ĠPre f +ĠG erry +T AG +ĠCho i +4 66 +beh ind +ĠO u +Gold Magikarp +Ġhemor rh +R iver +Ġtend on +Ġinj ure +ĠF iona +Ġp ag +Ġag itation +|| || +ur an +ĠE SA +Ġest eem +Ġdod ging +Ġ4 12 +r ss +Ġce ases +ex cluding +Ġint akes +Ġinsert s +Ġemb old +ĠO ral +up uncture +4 11 +ĠUn ified +ĠDe le +Ġfurn ace +ĠCoy otes +ĠBr ach +L abor +Ġhand shake +Ġbru ises +Gr ade +éĹ ĺ +ĠGram my +ile en +St ates +ĠScandinav ian +ĠKard ash +8 66 +Ġeffort lessly +ĠDI RECT +ĠTH EN +ĠMe i +ert ation +19 68 +Ġgro in +w itch +Requ irements +98 5 +Ġroof s +Ġest ates +ĠH F +Ġha ha +Ġdense ly +ĠO CT +Ġpl astics +Ġincident ally +ĠTr acks +ĠTax es +Ġch anted +Ġforce ful +ĠBie ber +ĠK ahn +K ent +ĠC ot +lic ts +F ed +Ġhide ous +ĠVer d +ĠSynd icate +ĠIl legal +J et +ĠD AV +re asonable +c rew +Ġfundamental ist +Ġtruth ful +ĠJ ing +Ġl il +Ġdown ed +Ġen chanted +ĠPolic ies +ĠMcM aster +ĠH are +ides how +Ġpar ams +en cers +gorith m +Ġallow ances +Ġturb ulent +Ġcomplex ities +ĠK T +Ġ3 37 +ĠGen etic +F UN +D oug +t ick +Ġg igs +ument hal +Ġpatriarch al +Ġcal c +, ... +Ġc out +ĠGu an +Ġpath ological +ĠR ivals +Ġunder rated +Ġflu orescent +ĠJ iu +arna ev +ĠQu an +Ġ4 29 +Ġ ਠ+M ario +Con struct +ĠC itation +ĠR acial +ĠR SA +ĠF idel +Ġ3 95 +Person ally +C ause +à » +rad ical +in en +Ġvehement ly +ĠPap a +Ġintern ship +Ġfl akes +ĠRe ck +Luck ily +B ra +20 20 +rav ings +R N +W onder +Ser iously +Ġre usable +Ġpoll uted +ĠP eng +le igh +ind le +Ġcircuit ry +ĠMad onna +ĠB ART +Res idents +att ribute +Phil adelphia +Cl ub +Ġplan ner +Ġfr antically +Ġfaith fully +ĠTerrit ories +ĠL AT +ĠAnders en +an u +ĠP ARK +ĠS ora +i age +ĠPlay offs +ĠG CC +4 27 +Ġab norm +ĠL ever +Ġdisob edience +As ync +ĠShe a +V ert +Ġsk irts +ĠSaw yer +x p +Ġwors ening +Ġsc apego +ĠAng le +oth al +Ġtro ve +ĠSt y +ĠN guyen +mar ine +ide on +Dep ths +Bl og +ĠIll uminati +Ġtract s +Ġorgan ise +Ġo str +F s +Ġlever aging +ĠD aredevil +as ar +Ġl ang +Ġex termin +urs ions +ĠRom o +ãĤ¤ ãĥĪ +Ġcont ended +Ġencounter ing +ĠTable t +ĠAltern ate +sk ill +Ġswe ets +Ġco hesive +cap acity +Ġrep ud +Ġl izard +ro o +Ġpilgr ims +ĠR uff +ĠInstr ument +ĠLog o +uit ous +E H +Ġsales man +Ġank les +L ed +ĠPat ty +ud os +Own er +Ġdiscrep ancies +k j +M U +Ġuncond itional +Dragon Magazine +i ard +O ak +ĠConvers ation +be er +ĠOs aka +D elta +us ky +Ġsecret ion +Ġpl aza +Ġm ing +Ġde pletion +ĠM ous +ĠI TS +ĠH imal +ĠFle ming +Ġcyt ok +ĠH ick +Ġbat ters +ĠInt ellectual +6 75 +é r +IS ION +ĠQu entin +ĠCh apters +ih adi +Ġco aster +WAY S +ĠL izard +ĠY or +and ering +S kin +ha ust +ab by +Ġportray ing +Ġwield ed +d ash +Ġprop onent +Ġr ipple +Ġgrap hene +Ġfly er +Ġrec urrent +Ġdev ils +Ġwater fall +æĺ ¯ +go o +Text Color +Ġtam pering +IV ES +TR UMP +ĠAb el +ĠS AL +ĠHend ricks +ĠLu cius +b ots +Ġ40 96 +IST ORY +Gu est +ĠN X +in ant +Ben z +ĠLoad ed +ĠCle ver +t reatment +Ġta vern +Ġ3 39 +ĠT NT +ific antly +Tem perature +F el +Ġunder world +ĠJud ges +Ġ< + +Ġst ump +Ġoccup ancy +Ġab er +ĠF inder +) ", +ĠN unes +res et +in et +ect omy +Ġwell ness +ĠP eb +quart ered +and an +Ġneg atives +ĠTh iel +ĠCl ip +ĠL TD +Ġbl ight +Ġreperto ire +K yle +Ġqu er +ĠC es +Ġha pl +98 9 +ĠTh ames +isc opal +Des k +ivari ate +ĠEx cellence +found ation +Ġâ ĩ +X i +Ġmyster iously +esty les +Ġper ish +ĠEng els +ĠDE AD +09 0 +}} } +ĠUn real +Ġrest less +ID ES +orth odox +ĠInter mediate +Ġdin ners +ĠTr out +ĠSe ym +ĠHall s +og ged +Ġtraged ies +Ġdid nt +67 6 +Ġail ments +Ġobserv able +ĠV ide +ad apt +ĠD usk +Ġprofessional ism +ĠPres cott +ĠInd ies +p ox +ĠMe hran +W ide +Ġend emic +ĠPar an +B ird +Ġped als +ĠI U +ĠAdam ant +ĠH urt +Ġcorrel ates +urd en +Ġspons oring +cl imate +ĠUnivers ities +ĠK not +enn es +ĠDam ian +ĠAx el +S port +Ġbar b +ĠS no +sh own +ste en +ud ence +Ġnon violent +Ġhom ophobia +Ġbiom ass +ĠDet ail +Ġsrf N +ĠT une +accompan ied +I ENCE +Al bert +ĠMong o +z x +ĠCer berus +or bit +c ens +Ġsl ay +SH ARE +H Y +Ġb rawl +ĠPro be +Ġnonex istent +ĠClare nce +ĠBlack burn +Ġport als +ĠR ita +ĠRem ain +ĠLe vant +Ġtrick ed +ĠF erry +aver ing +ĠStraw berry +ĠAn swers +Ġhorrend ous +ĠA man +Supp lement +ĠT oad +Ġpe eled +Ġman oeuv +ĠU zbek +mond s +ĠH ector +Ġ40 2 +pe es +fix es +Ġd j +Ġres umes +Ġaccount ant +Ġadvers ity +Ġham pered +ĠL arson +Ġd oping +part s +H ur +Ġbe arded +Ġy r +ĠPlug in +å¥ ³ +Ġ/ ** +rol ley +Ġwaters hed +ĠSub mission +if lower +AS C +Ġcho ir +Ġsculpt ures +m A +incre asing +ai i +Ġsne akers +Ġconfront s +ĠEle phant +ĠEl ixir +Ġrec al +ĠT TL +w idget +ĠW ax +ĠGr ayson +Ġha irst +Ġhumili ated +ĠWAR N +app iness +ĠT TC +F uel +Ġpol io +Ġcomplex es +Ġbab e +ĠX IV +P F +). [ +P arts +Ġ4 35 +M eg +ĠY ards +ĠAL P +Ġy ells +Ġprin ces +Ġbull ies +ĠCapital ism +ex empt +FA Q +ĠSp onge +ĠAl a +Ġpleas antly +Ġbu f +Ġden ote +Ġunp ublished +Ġkne eling +asc a +Ġl apse +al ien +99 4 +Ġrefere es +ĠLaw yers +S anta +Ġpuzz ling +ĠProm etheus +ĠPh araoh +ĠDel ay +Ġfacilit ates +ĠC ES +Ġjew els +Ġbook let +ond ing +Ġpolar ization +ĠMor an +ĠSal ad +ĠS OS +ĠAdv ice +PH OTOS +IC AN +iat ures +ex press +ĠWonder land +ĠC ODE +ĠCL ASS +9 75 +Ġg rep +ĠD iesel +ĠGl ac +! ?" +Ġr m +o ine +disc rimination +ĠN urse +m allow +Ġv ortex +ĠCons ortium +Ġlarge Download +stra ight +augh lin +G rad +Ġpublic ized +ĠW aves +ĠRed d +Ġfest ivities +ĠM ane +ar ov +Ġfleet ing +ĠDr unk +ug en +C ele +Ġchromos omes +ĠD OT +-+-+ -+-+ +Ġbus iest +ĠBe aver +Sy rian +ĠK yr +k as +ĠCross Ref +19 50 +76 01 +Ġrepe aling +ĠWin ners +ĠMac ro +ĠD OD +bl ance +S ort +64 1 +Ġmet re +ĠD irk +Ġgo ggles +Ġdraw backs +Ġcomplain ant +Ġauthor izing +Ġantit rust +oper ated +Ġm ah +Ġexagger ation +Am azing +ĠSer aph +Ġha ze +w ow +Ġextingu ished +Ġcan yon +ĠB osh +Ġv ents +Ġsc rape +Cor rect +4 26 +Ġav g +Dem and +ĠâĪ ¼ +Ġmicrobi ota +"} ]," +ĠSt ev +B io +ĠPlan es +Ġsuggest ive +Ġdec ipher +ĠRefuge e +ĠKe jriwal +ĠGreen peace +Ġdecl ass +ĠSound ers +Ġth o +Ġdec rypt +Ġbr ushing +ĠJane iro +ip op +S i +8 77 +ĠGeoff rey +Ġc pu +ĠHaz el +Ġview points +Ġcris py +ĠNot ification +Ġsold er +ĠMod est +ĠHem isphere +Ġcass ette +in cludes +Ġident ifiers +ĠC ALL +in cent +T odd +ĠSwe ep +Ġ3 34 +b oss +Ġsm ir +gin x +Ġtown ship +Ġg rieving +ĠMos que +Net flix +AS ED +ĠMillenn ials +oc om +19 67 +Ġbold ly +s leep +Ġes che +arij uana +Ġsw irl +ĠPen al +Ġneglig ent +ĠStephen son +K ER +ĠZ oro +ris is +Ġlocal ization +ĠSeym our +ĠAng lic +red itation +prot ection +ĠPa ige +Ġo mit +ĠR ousse +ĠT ub +Ġinv itations +t ty +Ġm oss +ph ysical +C redits +Ġan archy +Ġchild care +Ġl ull +ĠM ek +ĠL anguages +lat est +ĠSan ford +Ġus ability +Ġdiff use +ĠD ATA +Ġsp rites +ĠVeget a +ĠProm otion +ãĥ¼ ãĤ¯ +rict ing +z ee +Tur kish +ĠTD s +pro ven +57 1 +Ġsmug glers +707 10 +Ġreform ed +ĠLo is +Ġun fl +ĠWITH OUT +ĠReturn ing +ann ie +ĠTom as +Fr anc +ĠProf it +ĠSER V +ĠR umble +ik uman +es an +Ġt esters +Ġgad get +Ġbrace let +ĠF SA +comp onent +Ġparamed ics +Ġj an +ĠRem em +ĠSk inner +Ġl ov +ĠQu ake +rom a +Ġfl ask +Pr inc +Ġover power +Ġlod ging +ĠK KK +ret te +Ġabsor bs +w rote +Ġ ," +K ings +ĠH ail +ĠFall ing +xt ap +ĠHel ena +ire ns +L arry +Ġpamph let +ĠC PR +G ro +ĠHirosh ima +Ġhol istic +". [ +Ġdet achment +Ġas pire +Ġcompl icit +ĠGreen wood +Ġresp awn +ĠSt upid +ĠFin ished +f al +b ass +Ġab hor +Ġmock ery +ĠFe ast +VID EO +Ġcon sec +ĠHung ry +P ull +ĠH ust +it ance +? ãĢį +) -- +ĠPar allel +con v +4 69 +ha ar +w ant +P aper +m ins +ĠTor o +ĠTR UMP +ĠR ai +D W +ĠW icked +ĠL ep +Ġfun ky +Ġdetrim ent +ios is +ache v +Ġde grade +im ilation +Ġret ard +Ġfrag mentation +Ġcow boy +ĠY PG +ĠH AL +Parent s +ĠS ieg +ĠStra uss +ĠRub ber +× IJ +Fr ag +Ġp t +Ġoption ally +ĠZ IP +ĠTrans cript +ĠD well +88 2 +M erc +ĠM OT +ãĥ¯ ãĥ³ +Ġhun ts +Ġexec utes +In cludes +Ġacid ic +ĠRespons ibility +ĠD umb +we i +And erson +ĠJas per +ight on +abs olutely +Ad ult +Ġpl under +Mor ning +ĠT ours +ĠD ane +Î º +ĠT EST +ĠG ina +Ġcan ine +aw an +Ġsocial ists +ĠS oda +Ġimp etus +ĠSupplement ary +oli ath +ĠKinn ikuman +mitted ly +second s +Ġorganis ers +Ġdocument aries +Vari able +GRE EN +Ġres orts +Ġbr agging +Ġ3 68 +Art ist +w k +bl ers +Un common +ĠRet rieved +Ġhect ares +Ġtox in +r ank +Ġfaith s +ĠG raphic +Ġve c +ĠL IA +Af rican +Ġard ent +end iary +L ake +ĠD OS +cient ious +ĠOk awaru +ĠAll y +ĠTim eline +D ash +ĠI c +contin ue +Ġt idy +Ġinstinct ively +ĠP ossibly +ĠOut door +ĠWould n +Ġl ich +ĠBr ay +ĠA X +Ġà ī +Ġ+ # +\ ' +Direct ory +ab iding +Ġf eral +ic ative +but t +Ġper verse +S alt +Ġwar ped +Ġnin eteen +Ġcabin ets +Ġsrf Attach +ĠSl oan +Ġpower ing +reg ation +F light +se vere +Ġst ren +Ġc og +ap ache +Ġâ Ŀ +Ġcaf eteria +p aces +ĠGrim oire +uton ium +Ġr aining +Ġcir cling +Ġlineback ers +c redit +Ġrep atri +ĠCam den +lic ense +Ġly ric +Ġdescript or +Ġval leys +Ġre q +Ġback stage +ĠPro hibition +ĠK et +Op ening +S ym +æĸ ¹ +Ġserv ings +Ġoverse en +Ġaster oids +ĠMod s +ĠSpr inger +ĠCont ainer +è » +ĠM ens +Ġmult im +Ġfire fighter +pe c +Ġchlor ine +Ð ¼ +end i +Ġsp aring +Ġpolyg amy +ĠR N +ĠP ell +Ġt igers +Ġflash y +ĠMad ame +S word +Ġpref rontal +Ġpre requisite +uc a +Ġw ifi +Ġmiscon ception +Ġharsh ly +ĠStream ing +ot om +ĠGiul iani +foot ed +Ġtub ing +ind ividual +z ek +n uclear +m ol +Ġright ful +49 3 +Ġspecial ization +Ġpassion ately +ĠVel ocity +ĠAv ailability +T enn +Ġl atch +ĠSome body +Ġhel ium +cl aw +Ġdi pping +XX X +Ġinter personal +7 10 +Ġsub ter +Ġbi ologists +ĠLight ing +Ġopt ic +Ġden im +end on +ĠC orm +Ġ3 41 +ĠC oup +Ġfear less +Ġal ot +ĠCliff ord +ĠRun time +ĠProv ision +up dated +lene ck +Ġneur on +Ġgrad ing +ĠC t +sequ ence +in ia +con cept +Ġro aring +ri val +ĠCaucas ian +Ġmon og +key es +Ġappell ate +Ġlia ison +EStream Frame +ĠPl um +! . +Ġsp herical +Ġper ished +Ġbl ot +Ġben ches +Ġ4 11 +Ġpione ered +Ġhur led +Jenn ifer +ĠYose mite +Ch air +Ġreef s +Ġelect or +ĠAnt hem +65 2 +Ġun install +Ġimp ede +Ġbl inking +Ġgot o +Dec re +A ren +Ġstabil ization +ĠDis abled +ĠYanuk ovych +Ġoutlaw ed +ĠVent ura +ten ess +Ġplant ation +Ġy acht +ĠHu awei +Ġsol vent +Ġgr acious +Ġcur iously +Ġcapac itor +Ġc x +ĠRef lex +Ph ys +ĠC f +pt in +cons ervative +Ġinv ocation +c our +F N +ĠNew ly +H our +As ian +ĠLe ading +ĠAer ospace +An ne +Ġpre natal +Ġdeterior ating +H CR +ĠNorm andy +ol ini +ĠAm bro +9 10 +Ġset backs +ĠT RE +Ġs ig +ĠSc ourge +59 7 +79 8 +Game play +Ġm sec +M X +Ġprice y +ĠL LP +aker u +Ġover arching +ĠB ale +Ġworld ly +Cl ark +Ġscen ic +Ġdisl iked +ĠCont rolled +T ickets +ĠE W +ab ies +ĠPl enty +Non etheless +Ġart isan +Trans fer +ĠF amous +Ġinf ield +ble y +Ġunres olved +ĠML A +ãĤ Ĥ +Cor rection +Ġdemocr at +ĠMore no +ro cal +il ings +Ġsail or +Ġr ife +h ung +Ġtrop es +Ġsn atched +ĠL IN +ĠB ib +ES A +ĠPre v +ĠCam el +run time +Ġob noxious +4 37 +Ġsum mers +Ġunexpl ained +ĠWal ters +cal iber +Ġg ull +ĠEnd urance +ä½ ľ +Ġ3 47 +Ir ish +Ġaer obic +Ġcr amped +ĠHon olulu +à © +us erc +ec ast +AC Y +ĠQu ery +ãĤ¹ ãĥĪ +Bet a +Ġsuscept ibility +ĠSh iv +ĠLim baugh +Ġà ĸ +ĠN XT +ĠM uss +ĠBrit ons +ES CO +EG IN +Ġ% % +Ġsec ession +ĠPat ron +ĠLu a +n aires +ĠJPM organ +us b +ocy te +Ġcouncill ors +ĠLi ang +f arm +Ġnerv ously +Ġattract iveness +ĠK ov +j ump +Pl ot +Ġst ains +ĠStat ue +ĠApost les +he ter +ĠSUP PORT +Ġoverwhel m +Y ES +Ġ29 1 +d ensity +Ġtra pping +M it +Ġf ide +ĠPam ela +atl antic +Dam n +Ġp ts +OP A +Ġserv icing +Ġoverfl owing +ul o +ĠE rit +t icket +light ing +ĠH mm +ãĥ¼ ãĥ« +im oto +Ġchuck le +4 23 +ãģ ķ +sh ape +Ġque ues +Ġanch ors +ãĤ¼ ãĤ¦ãĤ¹ +F er +Ġaw oke +Ġ6 66 +h ands +Ġdiver gence +Ġ50 5 +T ips +Ġdep ot +Ġske w +ĠDel iver +op ot +Ġdiv ul +ĠE B +uns igned +ĠUn i +X box +Ġfor ks +Ġ7 02 +å ¯ +Ġpromot ers +ĠV apor +Ġlev ied +sl ot +Ġpig ment +Ġcyl inders +C RE +Ġsn atch +Ġperpet ually +Ġl icking +ĠFe et +ĠKra ken +ĠHold en +ĠCLS ID +m r +Ġproject or +Ġden otes +Ġchap el +ĠTor rent +b ler +R oute +ĠDef endant +ĠPublisher s +ĠM ales +ĠInn ov +ĠAg ility +rit er +ty mology +st ores +L ind +Ġf olly +ĠZur ich +B le +Ġnurt ure +Ġcoast line +uch in +D omin +Ġfri vol +ĠCons olid +res ults +M J +Ġphyl ogen +Ġha uled +ĠW iley +ĠJess ie +ĠPrep are +ĠE ps +Ġtreasure r +I AS +Ġcolon ists +Ġin und +ĠWW F +ĠCon verted +6 000 +out side +ĠApp earance +ĠRel ic +ĠM ister +s aw +Ġresult ant +Ġadject ive +ĠLaure l +ĠHind i +b da +Pe ace +Ġreb irth +Ġmembr anes +Ġforward ing +Ġcoll ided +ĠCar olyn +K ansas +5 99 +ĠSolid GoldMagikarp +Be ck +Ġstress ing +ĠGo o +ĠCooper ative +Ġf s +ĠAr chie +L iter +ĠK lopp +J erry +Ġfoot wear +War ren +Ġsc ree +h are +Under standing +P ed +Ġanth ology +ĠAnn ounce +M ega +Ġflu ent +Ġbond age +ĠDisc ount +il ial +C art +ĠNight mares +Sh am +ĠB oll +uss ie +H ttp +Atl anta +Ġun recogn +ĠB id +Ġunder grad +Ġforg iving +ĠGl over +AAAA AAAA +4 45 +V G +pa io +kill ers +Ġrespons ibly +Ġmobil ize +Ġeffect ed +ĠL umin +Ġk ale +Ġinfring ing +ann ounced +Ġf itt +b atch +ĠT ackle +ĠL ime +ĠAP P +uke mia +Ġrub y +Ġex oner +ĠCas ual +0 70 +Ġpel vic +Ġautom ate +ĠK ear +ĠCoast al +Ġcre ed +Ġbored om +ĠSt un +ri ott +Ĥ İ +Ġregener ate +Ġcomed ians +ĠOP ER +Sp ons +id ium +on is +L ocated +05 7 +Ġsusp ense +ĠD ating +C ass +Ġneoc ons +ĠShin zo +Ġaw oken +ch rist +ĠMess ages +att led +ĠSpr ay +ĠSp ice +C W +Ġshield ing +ĠG aul +Am id +Ġparam ilitary +Ġmult if +ĠTan ner +il k +Ġgodd amn +g ements +Ġbe friend +m obi +Ġ3 88 +fold er +acc a +Ġins in +g ap +N ev +fif th +Ġpsychiat ry +b anks +TH IS +Ġhar b +ac qu +Ġfac ade +ĠPower Point +80 3 +Ġbl uff +Sh ares +Ġfavor ing +El izabeth +Ãį Ãį +Ġr anger +77 2 +ĠAr che +h ak +ĠGen etics +ĠF EMA +Ġev olves +Ġest e +ĠP ets +ĠM é +ĠInterest ing +ĠCanter bury +ch apter +ĠStar fleet +Sp anish +Ġdraw back +ĠNor wich +9 70 +n orth +ag anda +Ġtransform ative +ram ids +bi ology +ad ay +Ġpropag ation +ĠGam ma +ĠDen ise +ĠCalcul ator +ent imes +ĠB ett +Ġapp endix +ĠHD D +AK ING +Ġst igmat +Ġhol ster +Ġord inarily +Ch ance +ĠCont rary +Ġad hesive +Ġgather s +6 12 +re au +ony ms +ew ays +Ġindu ces +Ġinterchange able +se m +Wh it +Ġtr ance +Ġincorpor ation +ĠExt ras +Fin ancial +Ġawkward ly +ĠStur geon +ĠH Y +Norm ally +ĠEnd ing +ĠAss ist +enc rypted +Ġsub jug +Ġn os +Ġfan atic +C ub +C U +?" . +Ġirre versible +å Ĥ +03 1 +ĠH AR +sp read +ul ia += $ +Sc ope +L ots +Ġlif estyles +ol on +Ġf eds +Ġcongrat ulate +web kit +Ġindist inguishable +ĠSw ing +Ġcommand ments +qu ila +ab ella +m ethyl +ann abin +Ġo vere +Ġlob ster +ĠQU EST +ĠCONT IN +bern atorial +:::: :::: +ĠTra ve +ĠSam oa +AN I +75 2 +Ð ´ +userc ontent +ĠMod erate +y eah +ĠK itt +Ġwe e +Ġstuff ing +ĠInter vention +ĠD ign +Ġware houses +ĠF iji +Ġpel lets +Ġtake away +ĠT ABLE +ĠClass ical +col lection +Ġland fall +ĠMus cle +Ġsett les +ĠAD V +Ġ3 44 +L aura +Ġf ared +ĠPart ial +4 36 +oss ibility +ĠD aly +ĠT arant +ĠFu ji +am l +c ence +55 1 +ĠProced ures +ĠO CD +ĠU D +t in +Q UI +ach o +4 38 +Ġgl itches +Ġenchant ment +Ġcalcul ates +IR O +ĠH ua +alys es +ĠL ift +um o +Ġle apt +Ġhypothes ized +ĠGust av +it ans +VERS ION +æ ł +Rog er +Ġr and +ĠAd apter +Ġ3 31 +ĠPet ition +k ies +M ars +Ġunder cut +ze es +ĠLy ons +ĠDH CP +Miss ing +Ġretire es +Ġins idious +el i +> ) +. ãĢį +Ġfinal ists +ĠA ure +Ġacc user +Ġwas tes +ĠY s +ĠL ori +Ġconstitu encies +Ġsupp er +Ġmay hem +or ange +Ġmis placed +Ġmanager ial +Ġex ce +ĠCL I +Ġprim al +ĠL ent +Cry stal +h over +ĠN TS +end um +Ġd w +ĠAl c +n ostic +Ġpres erves +ĠTs arnaev +Ġtri pled +rel ative +Arc ade +k illing +ĠW EEK +ĠH anna +D ust +Com pleted +ģ « +Ġappro ves +ĠSur f +ĠLuther an +ven ants +Ġrobber ies +we ights +soft ware +at ana +ug al +Ġgrav y +ĠC ance +OLOG Y +ly ak +Ton ight +Ġunve il +Ġ19 04 +ĠMin ion +ent ious +st ice +pack ages +ĠG EAR +Ġg ol +ĠHutch inson +ĠProf ession +ĠG UN +ĠDiff erence +ĠTsuk uyomi +ĠLes bian +6 70 +Ġfug itive +ĠPlan etary +-------------------------------- ------------------------ +Ġacc rued +Ġch icks +Ġsto pp +Ġblock ers +C od +Ġcomment ers +ĠSomew here +ĠPhot ographer +the me +Ġmay oral +w u +Ġanten nas +Ġrev amped +ĠSubject s +it é +im ura +Ġentr ances +liter ally +Ġten ets +ĠO MG +ĠMP H +ĠDon key +ĠOff ense +Ġ" + +Sn ap +ĠAF B +Ġan imate +ĠS od +His panic +Ġinconsist ency +D b +F Y +Ex port +Ġa pe +Ġpear l +ib el +ĠPAC s +Ġ{ \ +Ġact u +ĠHS BC +camp us +Ġpay off +Ġde ities +ĠN ato +ou ple +Ġcens ored +ĠCl ojure +Ġconf ounding +en i +Ġreck on +op he +Ġspot ting +Ġsign ifies +Ġprop el +Ġfest ive +S uggest +Ġpled ging +ĠB erman +Ġrebell ious +Ġovershadow ed +Ġinfiltr ated +j obs +67 2 +Ġscal able +Ġdomin ion +ĠNew foundland +ĠMead ow +Ġpart itions +AM I +Ġsupplement ary +str ument +Ġhair y +Ġperpet uate +Ġnuts hell +ĠPot ato +ĠHob bit +Ġcur ses +Flo at +Ġquiet er +Ġfuel ing +Ġcaps ules +ĠL ust +ĠH aunted +Exec utive +Ġchild birth +G re +Ġrad iant +å İ +Ġm alls +Ġin ept +ĠWarrant y +Ġspect ator +E h +t hens +Ġculmin ating +æ © +ary a +ãĤ ® +ilit arian +ĠOR IG +ĠSp ending +pt ives +ĠS iren +ĠRec ording +ay ne +Ġv im +Ġspr ang +T ang +ĠM FT +mor ning +ĠWe ed +m peg +cess ion +ĠCh ung +7 30 +w arning +56 2 +handed ly +P oor +P olitics +: # +Ġp ian +Ġfec es +ĠDocument ation +Ġban ished +Ġ3 99 +ĠAR C +Ġhe inous +J ake +ĠAm ir +way ne +v re +os henko +Ġnotebook s +Ġfound ational +Ġmarvel ous +ixt ape +Ġwithdraw als +Ġh orde +ĠD habi +is able +ĠK D +Ġcontag ious +ĠD ip +ĠAr rows +Ġpronoun s +Ġmorph ine +ĠB US +68 2 +Ġk osher +fin ished +ĠInstr uments +Ġf used +yd en +ĠSal mon +F ab +aff ected +K EN +C ENT +Dom ain +Ġpoke mon +ĠDr inking +G rowing +ĠInvestig ative +ĠA ether +em i +Ġtabl oid +Ġrep ro +ĠNot withstanding +ĠBers erker +Ġdram as +Ġclich é +Ġb ung +ĠU RI +ĠD os +0 44 +Ġpast ors +Ġl s +Ġac rylic +aun ts +Ed ward +Ġmajor ities +B ang +Ġfield ing +ĠRepl acement +ĠAl chemy +pp ard +ĠRome o +ĠSan ct +ĠLav rov +ib ble +Inst ruct +Ġimp ractical +ĠPlay boy +ce phal +Ġsw aps +Ġk an +ĠThe o +Ġillust rating +Ġdismant led +ĠTrans gender +ĠG uth +UG H +Ġtriumph ant +Ġencomp ass +Ġbook mark +udd in +j er +Ġpred icate +ES H +Ġwhen ce +ĠAB E +Ġnon profits +Se qu +Ġdi abetic +Ġp end +Ġheart felt +sh i +Ġinter acts +ĠTele com +Ġbombard ment +dep ending +ĠLow ry +ĠAd mission +ĠBl ooming +ust ration +ene gger +B rew +Ġmol ten +ĠNer d +P IN +âĸ Ģ +ave ment +Ġtou red +Ġco efficients +ĠTray von +ans son +Ġsand y +t old +fl ows +Ġpop ulous +ĠT inder +ĠBl iss +R achel +Min imum +Ġcontest ant +ĠRed uce +ĠMor se +ĠGrass ley +ĠClick er +Ġexp r +Ġs incerity +Ġmar qu +Ġelic it +ĠPro position +ĠDemon ic +Ġtac os +G reek +Ġpost war +Ġin sofar +ĠP ork +Ġ35 2 +doctor al +walk ing +Ġmid term +ĠSam my +sight ed +ĠTR ANS +ic i +AL D +ĠUS L +ĠF ISA +ĠAm pl +ĠAlex andra +ine lli +Tr ain +Ġsign ify +ĠVers us +Ġob fusc +Ġk h +Ġagg ro +ĠRen ault +Ġ3 48 +5 18 +ox icity +0 22 +ĠTw ist +Ġgoof y +D ynamic +Ġbrief ings +m ight +8 99 +Ġderog atory +T ro +Ġfor ging +ĠKor an +ĠMar ried +ĠBuc s +Ġpal ate +ĠCon version +m able +4 13 +Ġ( _ +Ġs iph +ĠN EO +col lege +Ġmarg inally +Ġfl irt +ĠTra ps +ĠP ace +é »Ĵ +Ġgoalt ender +Ġforb ids +Ġcler ks +ĠT ant +ĠRobb ins +ĠPrint ing +Ġpremie red +Ġmagn ification +ĠT G +ĠR ouse +ĠM ock +odynam ics +Ġpre clude +ism o +ĠPul itzer +Ġaval anche +ĠK odi +rib une +ĠL ena +Elect ric +Ġref inery +Ġend owed +Ġcounsel ors +Ġd olphin +ĠM ith +Ġarm oured +hib ited +Beg in +ĠP W +O il +ĠV or +ĠShar if +ĠFraz ier +est ate +Ġj ams +Pro xy +Ġband its +ĠPresbyter ian +ĠPrem iere +t iny +ĠCru el +Test ing +Ġhom er +ĠV ERS +ĠPro l +ĠDep osit +ĠCoff in +Ġsemin ars +Ġs ql +ĠDef endants +Altern atively +ĠR ats +ç « +ethy st +' > +Ġiss uer +58 9 +Ġch aired +ĠAccess ories +man ent +Ġmar row +ĠPrim ordial +C N +Ġlimit less +ĠCarn age +Ġund rafted +q v +IN ESS +on ew +Ġco hesion +98 7 +Ġne cks +Ġfootball er +ĠG ER +Ġdetect able +ĠSupport ing +ĠCS V +oc ally +k Hz +Ġund e +Ġsh one +Ġbud ding +tra k +Stand ing +ĠStar craft +ĠKem p +Ben ch +Ġthw arted +ĠGround s +ath i +L isa +Dial og +ĠS X +V ision +Ġingen ious +Ù IJ +Ġfost ering +ĠZ a +ĠIn gram +Ġ" @ +N aturally +6 16 +0 35 +ĠF AC +H mm +55 4 +Ġacceler ator +ĠV end +Ġsun screen +Ġtuber culosis +rav iolet +ĠFunction al +ĠEr rors +ed ar +19 66 +ĠSpect re +ĠRec ipes +88 5 +ĠM ankind +L iverpool +Ġ| -- +Ġsubst itutes +ĠX T +w ired +Ġinc o +ĠAf gh +E va +ic c +S ong +K night +Ġdilig ently +ĠBroad cast +A id +Ġaf ar +ĠH MS +aton in +ĠGr ateful +Ġfire place +ĠOm ni +e uro +ĠF RE +ĠSh ib +ĠDig est +t oggle +Ġheads ets +Ġdiff usion +ĠSqu irrel +ĠF N +Ġdark ened +out her +Ġsleep s +ĠX er +gun s +Ġset ups +Ġpars ed +Ġmamm oth +ĠCur ious +g ob +ĠFitz patrick +ĠEm il +im ov +........ ..... +ĠB enny +Second ly +Ġheart y +Ġcons on +st ained +Ġgal actic +cl ave +Ġplummet ed +Ġp ests +Ġsw at +Ġrefer rals +ĠLion el +h oly +Ġunder dog +ĠSl ater +ĠProv ide +ĠAm ar +ress or +å Į +ong a +Ġtim id +Ġp iety +ĠD ek +Ġsur ging +az o +Ġ6 10 +Ġdes ks +ĠSp okane +ĠAn field +Ġwars hips +ĠCob ra +Ġar ming +clus ively +ĠBad ge +ag ascar +ĠPR ESS +ĠMcK enzie +ĠFer dinand +burn ing +Af ee +Ġtyr ann +ĠI w +ĠBo one +100 7 +ĠRe pt +Ċ Âł +Ġcar avan +ĠD ill +ĠBundes liga +Ch uck +Ġheal er +ãĥ¼ãĥ Ĩ +ĠH obby +Ġneg ate +Ġcrit iques +section al +mop olitan +Ġd x +Ġouts ourcing +ĠC ipher +t ap +Sh arp +Ġup beat +Ġhang ar +Ġcru ising +ĠNi agara +Ġ3 42 +ill us +ĠS v +Ġsubt itles +Ġsqu ared +Ġbook store +Ġrevolution aries +ĠCarl ton +ab al +Ut ah +Ġdesp ise +ĠU M +cons ider +aid o +Ġc arts +ĠT urtles +Tr aining +Ġhonor ary + ¢ +Ġtri angles +4 22 +Ġreprint ed +Ġgrace ful +ĠMong olia +Ġdisrupt ions +ĠB oh +Ġ3 49 +Ġdr ains +Ġcons ulate +Ġb ends +Ġm afia +ur on +ĠF ulton +m isc +Ġren al +Ġin action +ck ing +Ġphot ons +Ġbru ised +ĠC odes +og i +Ġn ests +ĠLove ly +ĠLib re +ĠD aryl +Ġ# ## +S ys +. ," +Ġfree zes +est ablishment +and owski +Ġcum bers +ĠSt arg +ĠBom bs +Ġleg ions +Ġhand writing +Ġgr un +ĠC ah +sequ ent +Ġm oth +ĠMS M +Ins ert +F if +Ġmot el +Ġdex ter +ĠB ild +hearted ly +Ġpro pe +ĠText ure +ĠJ unction +ynt hesis +oc ard +ĠVer a +ĠBar th +Ġμ g +Ġl ashed +Ġ35 1 +ĠZ amb +ĠSt aples +ĠCort ex +ĠCork er +Ġcontinu um +ĠWR ITE +unt a +rid or +Ġde ems +0 33 +ĠG OLD +p as +Ġrep ressive +ãĥĨ ãĤ£ +Ġbaff led +Sc ar +Ġc rave +Ġ ______ +Ġentrepreneurs hip +ĠDirector ate +Ġ' [ +Ġv ines +Ġasc ended +ĠGR OUP +ĠGood bye +Ġdo gged +ãĥ´ ãĤ¡ +Man ufact +Ġunimagin able +ri ots +ier rez +Ġrel ativity +ĠCraft ing +ra ught +ud en +c ookie +Ġassass ins +Ġdissatisf ied +ac ci +Ġcondu it +Sp read +ĠR ican +n ice +izz le +Ġsc ares +ĠWH Y +ph ans +5 35 +Ġprot racted +ĠKrist en +5 36 +ĠSc rib +ĠNe h +Ġtwent ies +Ġpredic ament +Ġhandc uffs +Ġfruit ful +ĠU L +ĠLud wig +Ġatt est +ĠBre aker +Ġbi ologically +ĠDeal er +Ġrenov ations +f w +ess en +Al ice +ĠHen ri +Ġun ilaterally +ĠS idd +h ai +ĠSt retch +S ales +Ġcumbers ome +ĠJ avier +Ġtrend y +Ġrot ting +ĠChall enges +Ġscra ps +Ġfac ets +ĠVer onica +ĠVer ge +ĠS ana +Al ien +ĠR ih +Ġrad ial +ect ar +Ġ6 30 +cl i +Mar ie +Ġwild fire +ĠCat o +h ander +Ġwait ress +Ġch ops +ĠS ECTION +Ġblunt ly +ĠCat alog +n ian +stud y +Ġpat rolling +ĠT enth +nex us +ĠN ON +op sy +Ġsc athing +s ie +Ġdeterior ated +V B +Naz is +Ġdep ictions +Ġauthent icated +ĠCon ce +k rit +Ġpromul g +ĠL ONG +U FC +ĠVis itors +ĠRec all +Ġrehab ilit +ĠSL I +Ġglac ier +ĠB ite +Ġ50 3 +Ġvom it +Ġfer mented +ĠKh alid +Ġgrad ed +ĠMag icka +ĠIch igo +power ful +ic ators +75 3 +Ġsh rew +Ġ35 6 +Ġlegal izing +Ġall otted +ĠArch demon +ith ing +igg urat +V OL +Le od +Ġo ily +Ġindu cing +Ġamy gdala +Ġadm ins +ĠAcqu isition +C AN +Ġsche matic +Ġmo an +ĠCamer oon +Ġt ink +Ġmer ry +Ġbutter flies +ĠGo ff +Ġworks pace +ĠCor ona +Ġj avascript +ĠD olphin +ĠCant or +4 64 +to e +AP S +ĠAg ing +Ġpadd ed +ĠZ heng +ĠHe ld +Ġest ranged +Ġ7 70 +. } +ĠDun ham +Ġsm okes +Ġcap itals +und ai +Sh in +ĠFound ing +Ġent itle +Ġcenter piece +D iscover +Ġthere to +al ert +ĠN ou +ĠAnaly st +l c +F H +FI ELD +ĠP OV +gr ay +Ġar cs +ĠH OT +Ġr s +Ġoblig atory +ĠArchitect s +ĠS ven +ĠF EC +0 200 +Christ mas +ĠAlban ia +rat om +58 7 +Ġhard ships +Ġaut os +ĠCharg es +Ġap es +Ġ3 76 +wal let +Ġintox ication +Ġgobl in +Ġ5 70 +++++++++ ++++++++ +ĠYel p +ĠMag netic +ĠBr iggs +R ail +Ġspawn s +ĠW iggins +Ġshowc ased +Ġres orted +ub en +Ġwh ipping +Ġim itate +Ġdigest ion +ĠUS PS +ĠG est +Ġye a +ĠT ight +ind al +ic as +` . +C AST +'' ; +ĠF et +opath ic +In valid +Ġregrett ed +Ġbro ccoli +ĠSc ores +e ve +Ġpost ings +Ġaccum ulating +Ġneed less +elf th +Ġmay ors +Ġsc rib +Ġanecd otes +Ġbot ched +ĠRib bon +ĠConstant ine +i uses +ess es +Ġdev ise +Comp ared +Ġp udding +Ġg arg +Ġev oke +79 7 +Ġdet ox +9 09 +ĠPie ces +ĠMcC artney +Ġmet ast +ĠK rypt +P OR +Ġt ending +ĠMerch ants +Pro of +ĠV arg +ĠPort able +ãĥ¼ãĥĨ ãĤ£ +B rain +25 00 +Ġfol iage +Ø ¹ +Ġment ors +ĠA ires +Ġminimal ist +Ġing ested +ĠTro jan +ĠQ ian +inv olved +0 27 +Ġer oded +RA FT +Ġbl urry +M ob +Ġbuff et +ĠFn atic +ae a +KN OWN +ĠIn it +s afety +en um +ACT ION +ĠCrus her +ĠD ates +Ġ ................ +c alling +ak ov +Ġvent ured +Ġ5 55 +au ga +H art +ĠA ero +M AC +Ġthin ly +Ġar ra +ST ATE +ild e +ĠJac qu +ĠFem ales +Ġthe orem +Ġ3 46 +Ġsmart est +ĠPU BLIC +ĠK ron +ĠB its +ĠV essel +ĠTele phone +Ġdec ap +Ġadj unct +ĠS EN +mer ga +Ġred acted +Ġpre historic +Ġexplan atory +ĠRun s +ĠUtt ar +ĠM anny +ĠAUTH OR +ĠUnle ashed +ĠBow ling +be ans +79 3 +Ġunivers es +Ġsens it +ĠK ung +re peat +ctr l +Ġp aced +Ġfull er +Cl ock +Ġrec omb +ĠF aul +ĠB unker +Ġpool ed +Ġan a +ĠM outh +LL OW +hum ane +Ġbull do +ĠMicha els +f am +Ġwreck ed +Ġport rays +ĠWh ale +ĠH es +Ġguess es +ĠBrow se +ĠL APD +Ġconsequ ential +ĠInn ocent +ĠD RAG +Ġtrans gress +ĠO aks +Ġtri via +ĠRes on +ĠA DS +-- + +ĠT oll +Ġgrasp ing +ĠTHE M +ĠT ags +ĠCon clusion +Ġpract icable +Ġho op +Ġunintention ally +Ġign ite +ĠM ov +ur ized +le hem +Ter min +Ġcolour ful +ĠLin ear +ĠEll ie +G y +Ġman power +Ġj s +Ġem oji +ĠSHAR ES +_ . +0000 7 +Ġsophistic ation +Ġunders core +Ġpract ise +Ġbl ob +op ens +Uk raine +Ke eping +Y C +J R +ult imate +Cl aim +Ġautom obiles +99 3 +ste el +Ġpart ing +ĠL ank +... ? +Ġ38 5 +Ġremem brance +Ġe ased +Ġcov ari +ĠS ind +Effect ive +Ġdisse mination +ĠMo ose +ĠCl apper +br ates +App ly +Ġinv is +Ġwors ened +âĢĶ - +Ġlegisl ator +ĠL ol +ĠRow e +Ġdealers hip +um ar +id ences +Ġinvestig ates +Ġc ascade +Ġbid der +ĠB EN +Iron ically +Ġpres iding +Ġd ing +Ġcontrad icted +Ġshut s +ĠF IX +Ġ3 66 +Dist rict +Ġsin ful +ĠChar isma +o ops +Ġtot ality +Ġrest itution +ĠOpt imus +ĠD ah +Ġcl ueless +urn ed +Ġnut rit +Ġland owners +Ġfl ushed +Ġbroad en +m ie +Ġprint ln +Ġn ig +ĠCorp us +J en +Ġprot o +ĠWik imedia +ĠPal o +C OR +Ġstory lines +Ġevangel icals +ĠDar rell +Ġrot or +ĠH W +sk illed +ery l +Ġbe gg +ĠBl umenthal +Ġwe aving +Ġdown wards +ĠJack et +ĠANG EL +Te chnology +Ġes oteric +alde hyde +Ġfur iously +Ġforeign er +We ak +CH O +ĠH ound +Exper ience +ĠPlay station +ĠM IA +ĠU ng +cl oth +ag all +Ġcal ming +iz ens +St ruct +ĠW itches +ĠCeleb ration +Ġ........ ...... +pt roller +ĠTC U +Ġb unny +ãĥ į +ut orial +Ġup scale +ĠSt a +ĠCol ossus +Ġchlor ide +ĠZ ac +ĠRe asons +ĠBrook ings +ĠWH ITE +][ / +ĠL ose +9 05 +Ġunders ide +ern els +Ġv ape +do zen +upp et +ĠST OP +mat ical +ĠStat ements +hed dar +P AC +Custom er +Ġmem os +ĠP J +end ars +ĠLim its +l augh +Ġstabil ized +ĠALE C +Y A +Up grade +al am +Ġtechn o +Ġan ew +fore seen +Ġcolleg iate +ĠPy ro +ĠD ism +Ġfront line +Ġammon ia +I U +Qu ite +John ny +ass in +G OP +ĠSt yles +ĠSovere ign +acter ial +5 49 +ĠR IP +ĠL ists +Ġ3 64 +ĠRece p +s ocket +ĠByr d +ĠCand le +An cient +Ġappell ant +en forcement +ace a +ans ki +Ġold s +88 6 +Ġsl urs +Ġem pires +Ġbuck le +Ġalien ation +ĠAber deen +Ġunic orn +Ġoverr iding +ĠL X +pp a +Ġdesp ised +ĠB ugs +ĠB ST +S outhern +5 33 +Ġhall mark +ĠPost er +Ġstem med +Ġprincip als +ĠT ECH +ĠSand wich +It aly +Ġche esy +ĠSet TextColor +ĠProt ective +ĠC ohn +J O +apt op +Re ason +Lead er +ĠUnder stand +ĠFr idays +ĠContin uous +Ġcl ipping +ĠR ye +Ġber th +tim er +ann is +re act +Ġbuff alo +ĠPar as +Ġ6 55 +Ġpres ided +ĠSun rise +Ġve ts +Ġcl oves +ĠMcC ull +Stre ngth +G AN +Ġill iter +ĠPric ing +l é +Ġresist or +Ġbr un +ĠSuff olk +Ñ ĭ +ĠL iver +Re leased +Ġwhat s +8 60 +ĠMe asures +Ġden ouncing +ĠRy zen +Ġsou ven +Ġcareg ivers +ch ini +ĠScar lett +Ġt rough +Cong ratulations +Ġtax is +ĠTrad ition +j it +Ġtable top +Ġhither to +Ġdis information +off ensive +h ra +ĠDISTR ICT +Ġcompl icate +chen ko +ĠRecon struction +Ġpalp able +Ġa usp +Ġ4 28 +Ġshowc ases +ĠPublic ation +know ledge +inn on +4 19 +Ġretri eval +and ers +Ġref ute +Ġinqu ired +g ur +Ġneg ativity +Ġcons erve +Ġafter life +Ġpres upp +ĠGill espie +Ġm t +ĠD N +T ap +Ġper pend +ĠS my +does n +Ġsp illing +Ġhyp ers +K ate +® , +ke pt +ĠP owered +Ġj a +ĠK lux +ard e +ab an +Ġ4 44 +Ġflatt ened +ĠImprove ments +urg a +ĠK und +Ġins cribed +Ġfac ult +Ġunpre pared +ĠCons umers +Ġsatisf ies +Ġpul monary +Ġinf iltration +Ġex ternally +Ġcongrat ulations +ag han +Ġair liner +Ġfl ung +Ġfly ers +G D +Ġsnipp ets +Ġrec ursive +Ġmaster ing +L ex +Ġovert ly +v g +Ġluck ily +Ġenc ro +ĠLanc et +ĠAbyss al +function al +Ġs ow +Ġsqu id +Ġnar ration +Ġn aughty +ĠHon our +ĠSpart ans +Ġsh atter +ĠTac oma +ĠCal ories +ĠR aces +Sub mit +Ġpurpose fully +w av +ĠY ok +F est +ĠG err +Met ro +Ġit iner +f amous +Ġ" { +in line +was her +Iss ue +ĠCL IENT +oz o +Vers ions +7 25 +ĠGl ock +Ġshield ed +ĠPC R +ENC Y +ĠWe ld +ĠSim pl +Ġredirect ed +ĠK ham +Ġ( > +Ġlab ou +Ġdi apers +ss l +Ġcell ar +organ isms +ore sc +ĠBer ks +did n +Sh ipping +C hest +Ġund one +Ġmillion aire +Ġc ords +ĠYoung er +appropri ately +Ġsequ els +u ve +ant icipated +Ġle wd +ĠSh irt +ĠDmit ry +V eter +Ġsl aying +ĠY ar +Ġcompl ication +I owa +ĠEric a +ĠBL M +g irlfriend +b odied +6 26 +19 63 +Ġintermedi ary +Ġcons olation +M ask +ĠSi em +ow an +Beg inning +Ġfix me +Ġculmin ated +Ġcon duc +ĠVolunte er +Ġpos itional +Ġgre ets +ĠDefin itions +Ġthink er +Ġingen uity +Ġfresh men +ĠMom ents +Ġ35 7 +ate urs +ĠFed Ex +s g +69 4 +Ġdwind ling +ĠBO X +sel age +Ġt mp +Ġst en +ĠS ut +Ġneighbourhood s +Ġclass mate +f ledged +Ġleft ists +Ġclim ates +ATH ER +ĠScy the +ul iffe +Ġs ag +Ġho pped +ĠF t +ĠE ck +ĠC K +ĠDo omsday +k ids +Ġgas ped +Ġmon iker +ĠL od +ĠC FL +t ions +r ums +fol ios +Ġm d +Ġunc anny +Ġtrans ports +ĠLab rador +Ġrail ways +Ġappl iance +ĠCTR L +æ Ģ +Pop ulation +ĠConfeder acy +Ġunb earable +Ġdors al +ĠIn form +op ted +ĠK ILL +Mar x +Ġhypoc ritical +q us +ĠN umerous +ĠGeorg ian +ĠAmbro se +ĠL och +Ġgu bernatorial +ĠX eon +ĠSupp orts +ens er +ee ly +ĠAven ger +19 65 +Ar my +Ġju xtap +Ġcho pping +ĠSpl ash +ĠS ustainable +ĠFin ch +Ġ18 61 +ict ive +at meal +ĠG ohan +Ġlights aber +ĠG PA +ug u +ĠRE PL +vari able +Ġher pes +Ġdesert s +ac iously +Ġsitu ational +week ly +ob l +Ġtext ile +ĠCorn wall +Ġcontrace ptives +ĠA ke +] - +ä¹ ĭ +: , +ĠW em +ĠB ihar +Ġ' . +Ġbe re +Ġanal ogue +ĠCook ies +Ġtake off +Whe el +Ġmaj estic +Ġcomm uting +0 23 +ĠCor pse +ass ment +min i +Ġgor illa +ĠAl as +ere e +Ġacquaint ances +ĠAd vantage +Ġspirit ually +Ġey ed +pm wiki +ĠE nder +Ġtrans lucent +Ġnight time +ĠIM AGES +5 45 +ĠK amp +ĠFre ak +Ġ ig +Port land +4 32 +ĠM ata +Ġmar ines +Ġh ors +ater asu +ĠAtt ribution +Ġ-------- - +Ġk ins +ĠBEL OW +++ + +Ġre eling +ol ed +Ġcl utter +ĠRel ative +Ġ4 27 +B US +Ġa vert +ĠChe ong +ĠA ble +ĠPry or +Develop er +Ġen cyclopedia +ĠUSA F +ĠG arry +Sp ain +Bl ocks +Ġexp osition +ĠGamer Gate +W OR +Ġstockp ile +Ġclot hed +ĠT one +ĠR ue +t umblr +Ġtreacher ous +Ġf rying +Ñ Į +ĠS ph +Ġrest raints +Ġemb odies +ĠG es +S afety +Ġnegoti ators +min ing +ĠAppalach ian +L OS +ĠJenn a +Ġpass ers +ç ĭ +sn ap +Ġshort en +creat or +Ġinn umerable +uther land +67 4 +ĠW OM +ĠAs cend +ĠArm ory +ĠTrans action +K ick +Ġsuit case +day Name +Ġwaste ful +mar riage +ĠMcC abe +ite ch +ĠO ss +Cl osure +ĠTreasure r +Ġindec ent +ĠD ull +Ġresid ences +19 59 +ĠS ettlement +Ham ilton +Ġself ies +ĠRank ing +ĠBark ley +ĠB ore +ĠW CS +ĠMar itime +ĠH uh +ĠForest ry +Ġcultiv ating +ĠBall ard +Ġg arrison +ĠSD L +9 30 +Ġnas cent +Ġirresist ible +Ġaw fully +\/ \/ +Ġequ ate +Ġanthrop ology +ĠSylv ia +Ġintest ine +Ġinnoc uous +cess ive +ag ra +ĠMet roid +G rant +8 55 +ģ ĸ +Ġ" _ +ãĥĥ ãĥī +Ġappra isal +ĠFred dy +04 6 +Ġ40 6 +Ġ18 30 +Ġd ocking +St atic +Ġp ont +ĠVolt age +ĠSt ead +ĠMort gage +ĠJon ah +Y L +CLASS IFIED +Ġas bestos +nik ov +Ġcoll agen +ĠOrb ital +P ocket +7 99 +Ġhy brids +inc hes +Ġinv oice +und y +Ġinequ alities +T rend +w ashed +B ALL +Ġluc id +ĠComment ary +Ġw itty +Br andon +Ġbru ising +Ġ6 20 +es cent +box ing +P OL +Ġ3 78 +R ect +Ġlic ences +ĠMcG ee +p ressed +D anny +Ġj ammed +ord inate +Ġle th +Ġdistingu ishes +ĠYam aha +IL S +ĠH ume +ĠC ategories +Rober ts +Ch art +Ġbeet le +ĠGra veyard +Ġ($ ) +o ÄŁ +Ġtw ilight +are lla +á ½ +Ġbooth s +ĠH HS +ĠFeld man +Ġexcav ation +Ġphilosoph ies +at ography +ĠGar age +te chnology +Ġunfor gettable +Ġver ifying +Ġsubord inates +E ls +Ġne b +G aming +EN A +ĠAchieve ment +it ters +ĠG abe +Ġd umps +for cer +Ġpo ignant +ĠM BA +ĠHe idi +ime i +Ġm ages +Ġliber ate +Ġcircum cised +ĠMer maid +ĠMat th +t ogether +ĠW ichita +Ġstore front +ĠAd in +V II +Four th +Ġexplore rs +W ER +Not able +Bro ok +m ens +F aith +-------- - +ĠJ ou +¬ ¼ +Ġpine apple +Ġam alg +el n +ark able +ĠãĤµ ãĥ¼ãĥĨãĤ£ +ĠãĤµãĥ¼ãĥĨãĤ£ ãĥ¯ãĥ³ +Ġov arian +ĠE choes +Ġhairc ut +Ġp av +Ġch illed +anas ia +Ġsty led +Ġd ab +ni per +Ġminister ial +ĠD UP +T an +Ġsul ph +ĠD eter +ĠBo hem +od an +Ġeduc ator +â ĵĺ +sp ir +Ch icken +ĠE leanor +Ġqu i +Ġheav iest +Ġgrasp ed +U RA +Ġcro oked +Jess ica +pro blem +Ġpred etermined +Ġman iac +Ġbreath s +ĠLauder dale +Ġh obbies +y z +Cr ime +Ġcharism a +d L +Ġle aping +Ġk ittens +Ang elo +ĠJ ACK +ĠSu zanne +Ġhal ting +ENT ION +Ġswall owing +ĠEarthqu ake +Ġeight eenth +ĠN IC +ĠIN F +ĠCons cious +Ġparticular s +circ le +7 40 +Ġbene volent +Ġ7 47 +Ġ4 90 +Ġr undown +ĠVal erie +ĠB UR +Ġcivil isation +ĠS chn +W B +ot ide +intern ational +Ġj ohn +Ġ19 02 +Ġpe anuts +Ġflav ored +k us +Ġro ared +Ġcut off +é £ +Ġorn ament +Ġarchitect ures +Ġ3 69 +ol or +ĠWild e +ĠC RC +ĠAdjust ed +Ġprov oking +land ish +Ġrational ity +Ġjust ifies +Ġdisp el +Ġa meric +ĠPol es +Ø © +Ġen vis +ĠD oodle +ä½ ¿ +igs aw +auld ron +Techn ical +T een +up hem +ĠX iang +Ġdetract ors +ĠZ i +ĠJournal ists +Ġconduc ive +ĠVolunte ers +Ġs d +Know ing +Ġtrans missions +ĠPL AN +ĠL IB +Ġall uded +Ġob e +Ġd ope +ĠGold stein +Ġwavelength s +ĠDest ination +nd a +ug i +Ġattent ive +ĠLe an +ral tar +Ġman g +mb uds +ak ings +b ender +Ġacc ol +Ġcraw led +N OW +Min nesota +Ġflour ished +ĠZ up +ĠSuper visor +ĠOliv ier +Ex cellent +Ġwid en +D one +Ġw ig +Ġmiscon ceptions +Cor p +W an +Ġvener able +ĠNot ably +ĠKling on +an imate +Bo ost +ĠS AY +miss ing +ibli ography +mel on +Ġpay day +Ø ³ +bo le +Ġve iled +ĠAl phabet +It alian +Ġever lasting +ĠR IS +ĠC ree +rom pt +Ġh ating +Ġgrin ning +Ġge ographically +OS H +Ġwe eping +ĠÂłĠÂłĠÂłĠÂł ĠÂłĠÂłĠÂłĠÂł +Ġimpe cc +Let ter +Ġblo ated +PL A +ĠFe in +Ġper sever +Th under +Ġa ur +ĠR L +Ġpit falls +âĸ º +Ġpredomin ant +Ġ5 25 +7 18 +AP E +7 14 +Ġfarm land +ĠQ iao +Ġv iolet +ĠBah amas +Ġinflic ting +ĠE fficiency +Ġhome brew +Ġundert ook +Ġcur ly +ĠHard ing +man ia +59 6 +Ġtem pered +Ġhar rowing +ĠP ledge +ĠFranken stein +è ª +M otion +Ġpredict ably +ĠExpl osion +oc using +er d +col o +FF ER +Ġback field +ĠV IDE +ue bl +N arr +ĠArg ument +Ġgen omic +Ġbout ique +Ġbatt ed +ĠB inary +Ġg amb +ĠRh ythm +67 3 +Ġa float +ĠOlymp ia +Y ING +Ġend if +is in +Ġwin ters +Ġsc attering +I v +D istance +Ġtr u +ĠCom fort +Ġne xus +Ġair flow +ĠByz antine +p ayers +con i +ĠB etsy +D eal +ĠN ug +ĠContin ent +red ibly +Ġoptim izing +al beit +Ġec static +ĠPro to +ç · +iv ot +âĸ Ħ +em p +rou nder +Ġcl out +ĠI ST +66 3 +ĠDoll ars +ĠD AC +Ġsubsc ribed +Ġrehears al +Ġam ps +ĠSh ang +es m +Ġspr inkle +Ġassail ant +ĠO o +ĠCoin base +T act +Ġret ina +Ġn uns +R ON +att o +Ġj ug +ĠSV G +Ġb ikini +ĠFI LE +ĠFound ers +ep ort +ĠK P +Ġrest ores +ĠTh ick +Ġash ore +Ġappro vals +R ender +M AG +G raham +ĠCort ana +ãĥ³ ãĤ¸ +ss h +or ians +ars ity +ĠInsp ired +u pper +Ġsign alling +Ġreb uke +Ġfl ares +Ġdownt ime +Stud ies +Ġstagn ation +ĠSequ ence +Ġgr unt +Ġass ures +ĠPL A +59 2 +Ġintra ven +d epend +Sus an +ĠManz iel +Man ia +Cont ract +Ġsl ams +Ġcult ured +Ġcred itor +L IST +ĠH UM +ĠChatt anooga +serv ed +Ġclo aked +ĠF TP +p owder +ĠSt ella +uct ive +Ġcheap ly +ĠMU CH +ĠGalile o +Ġsu ites +spe ech +Ġdeliber ations +ĠCh ips +« ĺ +Bal ance +ĠWyn ne +ĠAk ron +Ass et +Ġhon oured +Ġed ged +Like wise +anim ous +ĠW age +ĠEz ek +ad vertisement +ĠRT X +ĠM AD +Ġmigr ating +ĠS QU +Ġ4 75 +Ed ited +Ġshorth and +ĠBas ics +Ġcro tch +ĠEV EN +Ġv m +effic iency +Ġcal ves +ĠF rie +ĠBrill iant +Ġstri kers +Ġrepent ance +Ġarter ies +r l +B ed +h ap +Ġcrypt ography +ĠSab res +Ġ4 14 +vi ks +ih ara +aps es +T alking +Ġintertw ined +Ġdoc ks +Ġalle le +ĠArt ifact +ĠH IM +t orn +ç ķ +Ġop acity +ĠE ly +os uke +Ġn ipple +Ġhand written +ĠV K +ĠChamber lain +ĠLa os +ig raph +g row +Ġtr illions +Ġdescend ant +ĠSail or +as uring +Ġce ilings +ĠWare house +f lying +ĠGl ow +Ġn ont +Ġmiscar riage +Ġrig s +Ġmin istries +Ġelabor ated +Ġdel usional +ĠHum ane +Ġ3 79 +n ets +Ġblack out +add ers +Ġn p +ĠT ire +ro sc +Ġsub div +Ġlink age +Ġchron ological +ĠHER O +Ġres ettlement +ĠVin yl +Ġpast oral +ĠMob il +ĠBar bar +Co oldown +ĠF ritz +c riminal +re pe +Ġbell ig +ĠBre ed +Ġ4 18 +Ġsem blance +ij k +Ġcur tail +Ġclin ch +cont ained +ĠProm pt +ast on +Ġw i +Ġpursu its +5 15 +ĠGl oss +Ġfl ips +Ġcoup ons +Ġcl oning +ĠLike ly +Rem oved +ĠQu artz +r ices +ĠSpe ars +Ġp ious +Ġdep reciation +ĠD are +oun ces +am az +O nt +Ġp innacle +d ocker +0 26 +ĠW yr +ĠPro per +Ë Ī +n il +By tes +Ġseek er +t rial +Ġunf olds +ĠMar se +Ġextravag ant +ĠSurviv ors +RED ACTED +ĠSpeed way +ĠCra igslist +sub mit +ĠGener ations +Ġup holding +Ġblood stream +ĠMiss ions +ĠL awn +Ġlim bo +ene i +H uh +ĠWild cats +pre p +ĠMark us +ĠFor bidden +rit ic +IN O +Ġexhib iting +requ ent +ch uk +Ġhabit ual +ĠComp atibility +Dr ag +RIP T +uj ah +GR OUND +Ġdelinqu ent +Ġburn er +Ġcontempor aries +Ġgimm ick +load s +Ġno zzle +p odcast +ĠW ak +ĠStat en +ĠK uh +ãģ ĵ +inter rupted +Ġinv incible +ĠBurn ett +cig arette +ĠPeb ble +ĠTem porary +ĠMar ino +58 2 +Ġwast eland +ident ly +T x +Ġr ite +ĠPan asonic +ĠM iddles +ĠHort on +ae us +Ġc uring +Ġm ats +Ġadj ourn +Ġfears ome +pe z +bo ats +Ġpro pell +Ġconflic ted +ĠAng er +Ġinsurg ent +K arl +Ġco ales +Ġsouth western +Ġdis su +ĠO vert +******** **** +Ġbox ed +ĠBr une +aa a +Ġgard ening +ĠEng el +tr acks +Ġpur ified +Ġplace holder +ĠL ikes +Ġd an +G ab +Ġe ct +ĠF aw +ĠEl iot +Ġ' , +otrop ic +ĠRu in +hed on +Ġca ul +Ġa ft +ĠCad illac +gh a +ass ian +ud eb +ĠT ick +Ġadjust s +AR GET +5 37 +isc he +ant y +ĠFried rich +ĠBl izz +ĠA OL +Camp aign +Ġmamm al +ĠVe il +ĠK ev +ĠMaur it +ĠDam ien +N ation +E astern +Ġ{ : +Ġ= ================================ +Ġstereotyp ical +Ġatt ic +ĠCy borg +requ ire +Ġaward ing +ĠPap ua +bt n +b ent +B oo +Ġ( = +ĠX ander +ĠSomers et +Ġcatch y +Ġcert ify +STR UCT +Ġit al +Ġt ides +ĠBr ands +G ray +comp etitive +Ġcur ator +ĠD G +omin ium +ĠGM Os +ci ating +ĠCarm en +ow ard +Balt imore +Ġr gb +C u +Ġwip es +spe ll +IT NESS +Ġsummar izes +ĠRe vis +Ġwhistlebl owers +ĠBre ach +Ġcro chet +k os +ews ki +Ġrep et +Ġcrim son +ĠKar achi +read able +dim ension +ĠI gor +ild ed +ĠZ ed +ĠKe ane +ĠCos metic +DE P +Ġretreat ing +ĠU A +ens ical +Ġd usk +ĠDick ens +Ġaren as +ĠPass age +level s +Ġcur v +P ope +Ġch ores +ĠEl ise +ĠComp ass +b ub +Ġmamm alian +ĠSans krit +ĠAN C +ĠCr ack +Q ual +L aun +amp unk +Ġlearn ers +Ġglam orous +Ġfur the +erm ott +c and +Gener ic +Ġnarr ated +Ġdisorder ly +ĠTrans actions +ĠDet ention +ĠR oku +Ä į +Ġunder statement +ĠS aur +ĠRodrig o +ĠAS AP +S in +Ġre joice +Method s +Ġelectro de +Ġworsh ipped +Ġid i +ĠPhys icians +Ġpop up +Ġde ft +ĠRem oval +ĠBu enos +ver bs +Ġfun k +ush a +rict ion +ore a +ĠBang alore +ĠKen obi +zz i +Ġnorm ative +Ġgobl ins +Ġcaf es +ĠUN CLASSIFIED +ĠF ired +S IGN +Ġs clerosis +ĠV oter +ĠSon ny +ĠExt end +ĠEV s +Ar senal +Ġp si +Ġwid est +ĠT us +Ġlo oms +Ġjust ifying +ĠGr anger +è ¯ +Ref er +58 3 +Ġflour ishing +ab re +Ġr ave +ĠCont ra +Ġ18 98 +Add s +Ġf ul +ĠCo oke +some one += # +67 1 +Ġy ak +Ġar te +ĠMis cellaneous +ĠDet ection +ĠCl ancy +â ģ +ass ies +Ġval iant +ĠFemin ist +cor ruption +V el +P ear +Ġsucc inct +Ġquick est +k w +Ġsp itting +ĠL ibraries +åħ ī +ant z +D ad +ĠSpec ifications +rup ulous +and r +RES ULTS +Ġsnow ball +Ġpred is +ĠB axter +ĠNurs ing +ĠCh aff +s we +Ġout age +Ġnest ing +Ġnotor iety +tr igger +on ite +j on +Ġf ou +ook ed +ĠCelebr ity +re ality +Ġfat ig +Ġhug ging +Ġbother s +ĠPan zer +ĠCh andra +fig ured +Ġvol ts +ĠCloud s +Ġfee ble +ĠCur ve +ĠAs us +78 6 +abs or +ĠV ICE +ĠH ess +Ġmanufact ures +Ġgri zz +ĠPower ful +ac id +Ġsub sections +ĠKrug man +ĠAl ps +is u +Ġsequ est +ĠUlt ron +ĠT inker +ĠGo ose +Ġmism atch +Att orney +Ġmorph ology +ĠSix ers +ut tered +ĠE LECT +gr an +Rus sell +ĠG SL +Ġfort night +Ġ. ) +Ġapost le +pr one +el ist +Unt itled +ĠIm plementation +ist ors +Ġtank er +Ġpl ush +Ġattend ants +ĠT ik +ĠGreen wich +ĠY on +ĠSP L +cell s +unt led +S olution +ĠQu é +Ġvac ated +Ġupt ick +ĠMer idian +æ ĥ +ĠDr ill +9 25 +58 4 +Ġrenov ated +ĠKub rick +zy k +Ġl ousy +pp el +ohyd rate +ĠI zzy +lesi astical +CC C +ĠAj ax +Ġad apters +ĠPetra eus +Ġaffirm ation +ĠST OR +le ms +ad oes +ĠConstantin ople +Ġp onies +Ġl ighthouse +Ġadherent s +ĠBre es +omorph ic +Fight ing +Ġpl aster +ĠP VC +ĠOb st +Ġdear ly +ĠTo oth +icks on +Ġsh aming +P lex +A gg +ĠâĢ¦ " +Ġsub reddits +Ġpige on +ĠResident ial +ĠPass ing +Ġl um +ĠP ension +Ġpessim istic +Ġ4 32 +z inski +c ade +0 75 +Ġapolog ised +iy ah +Put ting +Ġgloom y +ĠLy me +=-=-=-=- =-=-=-=- +ĠT ome +ĠPsych iatric +ĠH IT +c ms +ap olog +Ġbreak er +Ġdeep en +Ġtheor ist +ĠHigh lands +Ġb aker +Ġst aples +Ġinterf ered +ĠAb ortion +jo ined +ch u +Ġform ulate +Ġvacc inations +Ġban ter +phe us +Ġoutfield er +ĠM eter +Ġ# #### +Ġ18 95 +Ġnarrow ing +ĠST ORY +f p +ĠC ST +ign ore +Ġproclaim ing +ĠR U +ĠB ALL +yn a +65 3 +Ġpos it +P RE +59 4 +ĠRegist rar +ĠPil grim +ic io +Ġpre tt +Ġlif eless +Ġ__ _ +Ne igh +ĠCh urches +orn o +Ġor cs +Ġkind red +ĠAud it +Ġmillenn ial +ĠPers ia +g ravity +ĠDis ability +ĠD ARK +W s +od on +Ġgrand daughter +ĠBro oke +ĠA DA +ER A +Ġpick ups +ĠWil kinson +ĠSh ards +ĠN K +Ġexp el +ĠKis lyak +Ġj argon +Ġpolar ized +ian e +Pub lisher +Ġreb utt +Ġapprehens ion +ĠK essler +Ġpr ism +F UL +19 64 +ĠL oll +ä ¿ +le thal +Å Ł +Ġg hetto +Ġb oulder +ĠSlow ly +ĠOsc ars +ĠInst ruction +ĠUl tr +ĠM oe +N ich +ĠP ATH +( * +ĠRE LEASE +un ing +rou se +en eg +Ġre imb +ĠDet ected +Do S +Ġster ling +Ġaggreg ation +ĠLone ly +ĠAtt end +hig her +Ġairst rike +ks on +SE LECT +Ġdef lation +ĠHer rera +C ole +rit ch +Ġadvis able +F ax +Ġwork around +Ġp id +mort em +ers en +Ġtyp o +Ġal um +78 2 +ĠJam al +script s +Ġcapt ives +ĠPres ence +ĠLie berman +angel o +Ġalcohol ism +ass i +Ġrec ite +Ġgap ing +Ġbask ets +ĠG ou +Brow ser +ne au +Ġcorrect ive +und a +sc oring +ĠX D +Ġfil ament +Ġdeep ening +ĠStain less +Int eger +Ġbu ggy +Ġten ancy +ĠMub arak +Ġt uple +ĠD roid +ĠS itting +Ġforfe it +ĠRasm ussen +ixt ies +es i +ĠKim mel +Ġmetic ulously +Ġap opt +ĠS eller +08 8 +ec ake +hem atically +T N +Ġmind less +Ġdig s +ĠAcc ord +ons ense +em ing +br ace +Ġe Book +ĠDist ribut +ĠInvest ments +w t +] ), +beh avior +56 3 +Ġbl inding +ĠPro testers +top ia +Ġreb orn +ĠKel vin +ĠDo ver +ĠD airy +ĠOut s +Ġ[ / +Ï Ģ +b p +ĠVan ity +ĠRec ap +ĠHOU SE +ĠF ACE +Ġ4 22 +69 2 +ĠAnt ioch +cook ed +Ġcoll ide +Ġa pr +Ġsle eper +ĠJar vis +Ġalternative ly +ĠLe aves +ĠM aw +Ġantiqu ity +ĠAdin ida +Ġab user +Poké mon +Ġass orted +ĠRev ision +ĠP iano +ĠG ideon +O cean +Ġsal on +Ġbust ling +ogn itive +ĠRah man +Ġwa iter +Ġpres ets +ĠO sh +ĠG HC +oper ator +Ġrept iles +Ġ4 13 +ĠG arr +ĠCh ak +Ġhas hes +Ġfail ings +Ġfolk lore +Ġab l +ĠC ena +ĠMac Arthur +ĠCOUR T +Ġperipher y +app ers +Ġreck oned +ĠInf lu +ĠC ET +Ġ3 72 +ĠDefin itive +ass ault +4 21 +Ġreservoir s +Ġd ives +ĠCo il +DA Q +Ġvivid ly +ĠR J +ĠBel lev +Ġec lectic +ĠShow down +ĠK M +ip ed +reet ings +ĠAs uka +L iberal +ĠÏ Ħ +Ġbystand ers +ĠGood win +uk ong +S it +ĠT rem +Ġcrim inally +ĠCirc us +ch rome +88 7 +Ġnan op +ĠOb i +ĠL OW +o gh +ĠAuth ors +ob yl +Ur ban +Ġt i +ĠWe ir +t rap +ag y +Ġparent heses +Ġout numbered +Ġcounter productive +ĠTob ias +ub is +P arser +ST AR +Ġsyn aptic +ĠG ears +Ġh iber +Ġdebunk ed +Ġex alted +aw atts +H OU +Ch urch +ĠPix ie +ĠU ri +ĠForm ation +ĠPred iction +C EO +Ġthro tt +ĠBrit ann +ĠMad agascar +ë ĭ +Ġbill boards +ĠRPG s +ĠBe es +complete ly +F IL +Ġdoes nt +ĠGreen berg +re ys +Ġsl ing +Ġempt ied +ĠPix ar +ĠDh arma +l uck +ingu ished +Ġend ot +Ġbab ys +05 9 +che st +r ats +Ġr idden +Ġbeet les +Ġillum inating +Ġfict itious +ĠProv incial +Ġ7 68 +Ġshe pherd +ĠR ender +Ġ18 96 +C rew +Ġmold ed +ĠXia omi +ĠSp iral +Ġdel im +Ġorgan ising +Ġho ops +ĠBe i +z hen +Ġfuck in +Ġdec ad +Ġun biased +am my +sw ing +Ġsmugg led +Ġk ios +ĠP ERSON +ĠInquis itor +Ġsnow y +Ġscrap ing +ĠBurg ess +P tr +ag ame +R W +Ġdro id +ĠL ys +ĠCass andra +Jac ob +Ġ35 4 +Ġpast ure +Ġfr anc +ĠScot ch +ĠEnd s +ĠI GF +def inition +Ġhyster ical +ĠBrown e +77 1 +Ġmobil ization +æ ķ +iqu eness +Th or +Ġspear headed +Ġembro iled +Ġconject ure +jud icial +Ch oice +Ġpaper back +P ir +Ġrec overs +ĠSur ge +ĠSh ogun +ĠPed iatrics +ãģ ł +Ġsweep s +ĠLabor atories +ĠP acks +al us +add in +Ġhead lights +g ra +Ev idence +COL OR +Ad min +Ĭ ± +Ġconco ct +s ufficient +Ġun marked +Ġrich ness +Ġdiss ertation +Ġseason ing +Ġg ib +ĠM ages +un ctions +ĠN id +che at +ĠTM Z +c itizens +ĠCatholic ism +n b +Ġdisemb ark +ĠPROG RAM +a ques +Ty ler +Or g +ĠSl ay +ĠN ero +ĠTown send +IN TON +te le +Ġmes mer +9 01 +Ġfire ball +ev idence +aff iliated +ĠFrench man +ĠAugust a +0 21 +Ġs led +Ġre used +ĠImmun ity +Ġwrest le +assemb led +Mar ia +Ġgun shots +ĠBarb ie +Ġcannabin oids +ĠTo ast +ĠK inder +IR D +Ġre juven +Ġg ore +Ġrupt ure +Ġbre aching +ĠCart oon +Ġ4 55 +ĠPale o +6 14 +Ġspe ars +ĠAm es +ab us +Mad ison +GR OUP +Ġab orted +y ah +Ġfel on +Ġcaus ation +Ġprep aid +Ġp itted +op lan +ĠShel ley +ĠRus so +ĠP agan +Ġwill fully +ĠCan aver +und rum +ĠSal ary +ĠAr paio +read er +ĠR ational +ĠOver se +ĠCa uses +Ġ* . +Ġw ob +Ke ith +ĠCons ent +man ac +77 3 +6 23 +Ġfate ful +et imes +Ġspir ited +ĠD ys +Ġhe gemony +Ġboy cot +ĠEn rique +em outh +Ġtim elines +ĠSah ara +ĠRel ax +ĠQuin cy +ĠLess ons +ĠE QU +SE A +N K +ĠCost co +Incre ase +Ġmotiv ating +ĠCh ong +am aru +ĠDiv ide +Ġped igree +ĠTasman ia +ĠPrel ude +L as +9 40 +57 4 +Ġch au +ĠSp iegel +un ic +-- > +ĠPhil ips +ĠKaf ka +Ġuphe aval +Ġsent imental +Ġsa x +ĠAk ira +ser ial +Mat rix +Ġelect ing +Ġcomment er +ĠNeb ula +ple ts +ĠNad u +ĠAd ren +Ġen shr +ĠR AND +fin ancial +ĠCly de +uther ford +Ġsign age +Ġde line +Ġphosph ate +rovers ial +f ascist +ĠV all +ĠBeth lehem +Ġfor s +Ġeng lish +S olid +N ature +Ġv a +ĠGu ests +Ġtant al +Ġauto immune +;;;;;;;; ;;;; +ĠTot ally +ĠO v +Ġdef ences +ĠCoc onut +Ġtranqu il +Ġpl oy +Ġflav ours +ĠFl ask +ãĤ¨ ãĥ« +ĠWest on +ĠVol vo +8 70 +Ġmicro phones +ver bal +R PG +Ġi ii +; } +0 28 +Ġhead lined +Ġprim ed +Ġho ard +ĠSh ad +ĠEN TER +Ġtri angular +Ġcap it +l ik +ĠAn cients +Ġl ash +Ġconv ol +Ġcolon el +en emy +G ra +Ġpub s +ut ters +Ġassign s +ĠPen et +ĠMon strous +ĠBow en +il ver +H aunted +ĠD ing +start ed +pl in +Ġcontamin ants +ĠDO E +ff en +ĠTechn ician +R y +Ġrob bers +Ġhot line +ĠGuard iola +ĠKau fman +row er +ĠDres den +ĠAl pine +E lf +Ġf mt +ĠS ard +urs es +g pu +Un ix +Ġunequiv ocally +ĠCitizens hip +qu ad +m ire +ĠS weeney +B attery +6 15 +Ġpanc akes +Ġo ats +M aps +ĠCont rast +mbuds man +ĠE PS +Ġsub committee +Ġsour cing +Ġs izing +ĠBuff er +ĠMand atory +Ġmoder ates +ĠPattern s +ĠCh ocobo +ĠZ an +ĠSTAT ES +ĠJud ging +ĠIn her +* : +Ġb il +ĠY en +Ġexh ilar +oll ower +z ers +Ġsn ug +max imum +Ġdesp icable +ĠP ACK +ĠAn nex +Ġsarcast ic +Ġlate x +Ġt amp +ĠS ao +b ah +ĠRe verend +ĠChin atown +ĠA UT +d ocumented +ĠGA BA +ĠCan aan +ĠÙ ħ +Ġgovern s +pre v +E sc +ĠEst imates +OS P +Ġendeav our +ĠCl osing +omet ime +every one +Ġwor sen +Ġsc anners +Ġdev iations +ĠRobot ics +ĠCom pton +Ġsorce rer +Ġend ogenous +Ġem ulation +ĠPier cing +ĠA ph +ĠS ocket +Ġb ould +ĠO U +ĠBorder lands +Ġ18 63 +G ordon +ĠW TO +Ġrestrict s +Ġmosa ic +Ġmel odies +ç Ħ +T ar +Ġdis son +ĠProv ides +Ġ ...... +b ek +F IX +Ġbro om +ans hip +Do ctors +Ġner ds +ĠReg ions +na issance +Ġmet e +Ġcre pt +pl ings +Ġgirlfriend s +kn it +ig ent +ow e +Ġus hered +ĠB az +M obil +4 34 +ĠPres ents +orig in +Ġins omnia +ĠA ux +4 39 +ĠCh ili +irs ch +G AME +Ġgest ation +alg ia +rom ising +$ , +c row +ĠIn spection +at omic +Rel ations +J OHN +rom an +ĠClock work +ĠBak r +m one +M ET +Ġthirst y +Ġb c +Ġfacult ies +R um +Ġnu ance +ĠD arius +ple ting +fter s +etch up +Reg istration +ĠK E +R ah +Ġpref erential +ĠL ash +ĠH H +Val id +ĠN AV +Ġstar ve +ĠG ong +z ynski +ĠAct ress +Ġw ik +Ġun accompanied +lv l +Br ide +AD S +ĠCommand o +ĠVaugh n +Wal let +Ġho pping +ĠV ie +Ġcave ats +Ġal as +if led +ab use +66 1 +Ġib n +Ġg ul +Ġrob bing +t il +IL A +Ġmit igating +Ġapt ly +Ġty rant +Ġmid day +ĠGil more +ĠDe cker +Ġ§ § +part ial +Ex actly +Ġphen otype +Ġ[+ ] +ĠP lex +ĠI ps +vers ions +Ġe book +Ġch ic +g ross +":" "},{" +ĠSur prisingly +M organ +Ġresid ues +ĠConf ederation +in feld +Ġl yr +mod erate +Ġperpend icular +V K +Ġsynchron ized +Ġrefres hed +Ġad ore +ĠTor ment +ol ina +Ġ26 00 +Item Tracker +Ġp ies +ĠF AT +ĠR HP +0 48 +ĠRES P +ĠB J +all ows +P and +Ġunw elcome +ĠV oc +ĠBast ard +ĠO W +ĠL AR +ĠHeal er +Environment al +ĠKen yan +ĠTr ance +ĠP ats +Ġali ases +ĠGar field +Ġcampaign er +Ġadvance ments +ĠOkin awa +ĠC oh +ows ky +Ġstar ved +Ġsize able +Ġ: -) +Ġm RNA +Ġsusp ensions +ist ar +Scot land +Pr in +-------------------------------- ---------------- +Ġ50 2 +Ġteasp oons +Ġ10 50 +Ġcoerc ive +ĠMason ic +edd ed +ĠPass enger +Ġl att +Ġbr aces +ĠSt eal +ĠNY T +ĠK ats +ĠCel est +ae z +T u +ĠCoul ter +ðŁ ĺ +Fl ickr +ĠWil mington +ith s +++ ; +Ġv ending +Ġneg ro +ĠPh i +ĠYellow stone +Call back +Ġsh ampoo +ĠSh ades +w at +Ġsuper human +Ġridic uled +Ġhol iest +om bo +Ġintern s +Ġh one +ĠPar agu +UR I +Ġd angling +ãĤ » +so v +ict ional +av ailability +Ġrev ocation +Ġd ow +in ic +ĠTHE IR +Ġis o +Ġout ings +ĠLeth al +Ġ) )) +Ġinacc ur +Ġout landish +Ġan us +let ico +id on +l ol +Ġun regulated +Ġsuccumb ed +Ġc uff +ĠWast eland +let al +Ġsub str +Ġcoff ers +Ġautom akers +ov i +ĠX ue +ĠDayton a +Ġjar ring +Ġf umes +Ġdisband ed +z ik +itt on +Ġstriking ly +Ġsp ores +Ad apter +.) : +ĠLynd on +ival ry +Ġor ally +Ġtumult uous +Ġdisple asure +Ġcon es +or rect +Ġappe ase +Ġder by +ĠTrip oli +ĠAl ess +Ġp oked +ĠGu ilty +v P +En ough +Ġorig inals +6 99 +Ġrabb i +Ġproverb ial +Ġpostp one +el ope +ĠMist y +Ġstaff ed +ĠUn employment +redit ary +Ġdilig ent +re comm +me asures +as in +8 25 +Ġpond s +Ġmm ol +ĠS AR +ĠC ARE +Ġ3 71 +Ġclen ched +ĠCors air +Ġcaric ature +z n +att ach +ĠSch ro +spe ak +p ainted +ĠS uc +ĠE NT +Ġcell ul +ĠP aid +di agn +WH ERE +Ġtext ed +B arn +Ġret racted +ĠRe ferred +S av +Ġup keep +Ġwork places +ĠTok ens +Ġampl ify +cl inical +Ġmult ic +mber g +Ġconvol uted +Reg ion +5 65 +ĠTop ic +Ġsn ail +Ġsal ine +Ġins urrection +ĠPet r +f orts +B AT +ĠNav ajo +Ġrud imentary +ĠLak sh +OND ON +Me asure +Ġtransform er +ĠGodd ard +Ġcoinc ides +ir in +R ex +ĠB ok +qu it +Ġshotgun s +Ġprolet arian +Ġsc orp +ĠAd a +5 14 +Ġsl ander +record ed +Ġemb ell +ris ome +Ġapolog izing +ĠMul cair +ĠGib raltar +Cl a +Ġall ot +ĠAtt ention +Ġ4 33 +le ave +Ġwh ine +ĠIss a +ĠFa ust +ĠBar ron +hen y +Ġvictim ized +J ews +Ġnurt uring +ett el +W inged +ĠSub tle +Ġflavor ful +ĠRep s +eng ed +call back +Ġdirection al +Ġcl asp +ĠDirect ions +plan et +icult ure +Hel per +ic ion +ac ia +Ġç ¥ŀ +Ġsur ges +Ġcan oe +ĠPrem iership +be en +Ġdef ied +ĠTro oper +Ġtrip od +Ġgas p +ĠE uph +ĠAd s +vern ight +high ly +R ole +Ġent angled +ĠZe it +6 18 +ĠRust y +Ġhaven s +ĠVaugh an +HA EL +ĠSER VICE +/ , +Ġstr icken +Ġdel usions +Ġb is +ĠH af +Ġgrat ification +Ġent icing +UN CH +Ad ams +ĠOL ED +ĠBeet le +Ġ18 99 +ĠSO FTWARE +ateg or +V L +ĠTot em +ĠG ators +AT URES +Ġimped ance +Reg istered +ĠC ary +ĠAer ial +on ne +en ium +Ġd red +ĠBe g +Ġconcurrent ly +Ġsuper power +ĠX an +j ew +imes ter +ĠDick inson +âĶ ģ +F la +Ġp ree +ĠRoll ins +© ¶æ +Ġden omination +ĠL ana +5 16 +Ġinc iting +sc ribed +j uries +ĠWond ers +app roximately +Ġsusp ending +Ġmountain ous +ĠL augh +oid al +N s +Det ect +) = +ĠL uthor +ĠSchwarz enegger +ĠMull er +ĠDev i +ec ycle +J ar +6 13 +ĠL ongh +B ah +ĠSP ORTS +n w +Ġref inement +Ġwater ways +Ġd iner +Bl ade +68 3 +F ac +Ġinitial s +Ġro g +Ġparan ormal +B UT +Ġ[ ( +ĠSw anson +ĠM esh +âĸ ¬ +Impro ve +ĠRad iation +ĠEst her +ĠE sk +ĠA ly +ik y +Ġir rad +ĠBuck ingham +Ġref ill +Ġ. _ +Re pe +CON CLUS +Ġdifferent iated +Ġchi rop +ĠAt kins +Pat tern +Ġexc ise +Ġcab al +N SA +ĠST A +ĠS IL +ĠPar aly +Ġr ye +ĠHow ell +ĠCount down +ness es +alys ed +Ġres ize +ãĤ ½ +Ġbudget ary +ĠStr as +w ang +Ġap iece +Ġprecinct s +Ġpe ach +Ġsky line +Ġ35 3 +pop ular +App earances +ĠMechan ics +ĠDev Online +S ullivan +Z en +Ġp u +op olis +5 44 +Ġde form +Ġcounter act +ĠL ange +Ġ4 17 +Con sole +77 4 +Ġnodd ing +Ġpopul ism +Ġhe p +Ġcoun selling +compl iance +U FF +Ġunden iably +Ġrail ing +ĠHor owitz +ĠSim one +ĠBung ie +Ġa k +ĠTal ks +x ff +fl ake +Cr ash +Ġsweat y +Ġban quet +ĠOFF IC +Ġinvent ive +Ġastron omer +ĠStam ford +ĠSc are +ĠGRE EN +olic ited +Ġr usher +Ġcent rist +ight ing +Ġsub class +Ġdis av +Ġdef und +ĠN anto +oci ate +m ast +Ġpac if +Ġm end +e ers +imm igration +ESS ION +Ġnumber ing +Ġlaugh able +ĠEnd ed +v iation +em ark +P itt +Ġmetic ulous +ĠL F +Ġcongrat ulated +ĠBir ch +Ġsway ed +Ġsemif inals +Ġhum ankind +m atter +ĠEqu ip +opa usal +S aid +ĠLay out +Ġvo icing +Ġth ug +Ġporn ographic +I PS +Ġmo aning +Ġgriev ance +Ġconf essions +esc al +TEXT URE +Aut hent +os aurus +P urchase +Ġreleg ation +al ter +ĠÂł Âł +Ġr iddled +Ġo gre +ĠLow ell +Occ up +E at +ĠHy der +ĠAdvis er +Com merce +H unt +ĠOr th +ĠComp etitive +ĠCL A +CD C +Ġsal ads +F le +Ġindustrial ized +` , +ĠO WN +Ġbec k +ĠPart icularly +oub t +Ġm M +ĠHuss ain +ĠChen nai +Ġ9 20 +Ġappoint ing +ĠCull en +,,,, ,,,, +Ġp ores +ver ified +Ġbi ochemical +em ate +Ġcoward ly +ĠHels inki +ĠEthiop ian +S OURCE +ER C +est ro +Ġbi otech +ĠS our +Ġbrew er +Bloom berg +Ġintens ify +Gl ass +an co +ĠF DR +gre SQL +ĠF ires +©¶æ ¥µ +ec o +100 1 +ĠHom eless +Ġinstant aneous +ĠH aste +ig el +D iamond +Ġp aving +Ġland fill +Ġd ads +h oun +: ] +Ġinc endiary +ĠLiving ston +ĠHil bert +ĠChe cks +st yles +in ators +ĠCl ive +ph rine +Ġchimpan zees +Ġp all +ĠJ M +ĠAad haar +ð Ŀ +Ġachie vable +dis abled +P ET +OOOO OOOO +M ot +Ġint angible +Ġbal let +ĠWe bs +ĠEst imated +Effect s +Ġb ailed +Josh ua +Ġturb ulence +Ġoccup ant +ĠDay light +Ġ36 1 +me et +Ġstat ically +Ġon look +Ġk i +il legal +Ġvel vet +Ġdehyd ration +Ġacqu ies +ĠRe z +ak ura +ĠU pton +at ro +Ġincomp rehensible +Ġback door +ĠRh ino +7 27 +Ġmath s +) + +Ġhe resy +Ġd f +ĠRoc he +ĠL ydia +Ġpanc reat +re ply +arre ll +Ġsolicit ation +Ġcirc adian +BI P +Ġfor ay +Ġcrypt ic +iz u +ime o +ĠTom ato +ĠH oms +ex amination +Ġqu arry +ĠVal iant +ĠJer icho +ĠIN CLUD +Ġ18 40 +5 19 +Ġres ists +Ġsnap shots +ĠSp ur +ĠAnt iqu +Log in +Ġbest selling +Ġant ic +ĠS utherland +ãĤ¢ ãĥ« +Ġ~ / +ĠP arm +è ĥ +P ages +int ensity +Ġimm obil +Ġ18 65 +zz o +Ġn ifty +Ġf entanyl +ĠPres ervation +op hen +Ġd arts +ĠD inosaur +po inters +ĠR ite +s uggest +aware ness +ĠSher idan +Ġst ances +Ġsor cery +Ġper jury +ĠNik ola +ie ver +Ġf iance +ĠJordan ian +ĠBall oon +Ġn ab +Ġk b +Ġhuman ities +ĠTan aka +hill ary +Ġconsult ancy +ĠZ ub +Ġrem ission +Ġconf id +CH Q +ĠF ug +Ġimpro vis +Y ep +/ _ +Ġunwilling ness +Ġport folios +05 5 +ĠInstruct or +aim an +Ġclaim ants +M bps +ĠBy e +re ceived +T weet +Ġind emn +ri z +am ara +N at +Ġeval uates +ĠL ur +ep ad +FO X +ĠTh ro +Ġrust y +Ġbed rock +ĠOp rah +J B +Ġmanip ulative +Ġwill ful +Ġrel apse +Ġext ant +The me +S ensor +ĠSt ability +go vern +Ġpo ppy +Ġkn ack +Ġins ulated +ĠT ile +ĠExt rem +Ġunt old +Ġconver ge +Ġref uel +ig roup +Ġdistort ions +Ġrav aged +Ġmechan ically +ĠRe illy +ĠN ose +ĠIncarn ation +ĠBeck y +abb ling +Ġt aco +Ġr ake +Ġmelanch oly +Ġillust rious +ĠDart mouth +Gu ide +ĠR azer +ĠBen z +Ult imate +ĠSur prise +Ġpage ant +off er +Who ever +Ġw iser +Ġchem ist +ĠHE LL +ĠBul k +Ġpl utonium +ĠCO VER +Ö ¼ +f ailed +Ġtire lessly +Ġinf ertility +ĠTr ident +ĠShow time +ĠC iv +V ice +requ ires +itt ance +Ġun controlled +interest ing +56 1 +Ġinnov ate +ateg ic +L ie +ĠS elling +U l +Ġsav ior +ĠT osh +Ġsw ast +P ASS +Ġr ink +Ġcard io +ĠI ro +ud i +Ġv antage +Ġv ans +ĠNi ño ++ = +Ġpropag ate +< ? +Ġmethod ological +204 39 +Ġtrig lycer +Ġing rained +ĠAn notations +arr anted +6 17 +ĠS odium +ĠA AC +techn ical +mult ipl +Ġ3 73 +å ĭ +Ġdec isively +Ġboost ers +Ġdessert s +ĠGren ade +Ġtest ifying +ĠSc ully +ID s +Ġlock down +ĠSc her +ĠR é +ĠWhit man +ĠRams ay +rem ote +Ġh ikers +ĠHy undai +Ġcons cientious +Ġcler ics +ĠSiber ian +ut i +is bury +Ġrel ayed +Ġqu artz +ĠC BI +seek ers +ull a +Ġweld ing +ĠSh al +ble acher +T ai +ĠSam son +Ġt umble +ĠInvest or +Ġsub contract +ĠShin ra +ow icz +j andro +d ad +Ġtermin ating +ĠNe ural +ä» £ +Ġleak age +ĠMid lands +ĠCaucas us +í ķ +c it +ll an +iv ably +ĠAlb ion +Ġ4 57 +Ġregist rations +Ġcomr ade +Ġclip board +0 47 +Ġdiscour aging +ĠO ops +Ad apt +Ġem path +n v +ĠPR OT +ĠDon n +ĠP ax +ĠB ayer +t is +Squ are +Ġfoot prints +part icip +ĠChile an +B rend +ind ucing +M agn +Ġclub house +ĠMagn um +Ġenc amp +ĠEth nic +uch a +ere y +Ġw atered +ĠCal ais +Ġcomplex ion +Ġsect s +Ġren ters +Ġbr as +oÄŁ an +Time out +Man agement +Ġinf ographic +P okemon +Cl ar +Ġloc ality +Ġfl ora +as el +P ont +Ġpop ulate +ĠO ng +Ġsubs istence +Ġa uctions +ĠMcA uliffe +ĠL OOK +br inger +Ġtit an +Ġmanif old +ĠâĹ ı +Ġcalibr ated +Ġcal iphate +ĠSH E +ĠCommission ers +ce ivable +j c +W inner +5 24 +Ġcond one +Other wise +Ġp iling +Ġem body +ĠCrime an +ut ics +ĠEx hibition +Ġ4 26 +e ering +Ġv ying +ĠH UGE +* =- +Ġprin cipled +à ¦ +Ġquir ks +ĠEdit ors +put ing +G ES +ĠF TA +ठ¾ +add on +ĠH AM +ĠFrie za +W oman +. $ +Ġc rib +ĠHer od +Ġtim ers +ĠSp aces +ĠMac intosh +at aka +Ġgl ide +Ġsmell ing +ĠB AL +Ġun su +Ġcond os +Ġbicy cl +ĠRev ival +55 3 +Ġjugg ling +H ug +ĠKardash ian +ĠBalk ans +mult iple +Ġnutrit ious +oc ry +19 00 +Ġinteg rates +Ġad joining +ĠF older +roll ment +ven ient +Ġu ber +y i +Ġwh iff +ĠJu ven +ĠB orough +net te +Ġb ilingual +ĠSp arks +ph thal +man ufact +Ġt outing +ĠPH I +Ke efe +Rew ard +Ġinf all +ĠTem per +typ ically +ĠNik ol +Ġregular s +Ġpseud onym +Ġexhib itions +Ġbl aster +Ġ40 9 +w arming +Ġrever ber +Ġrecip rocal +Ġ6 70 +ip ient +b ett +ĠBe gins +Ġit ching +ĠPh ar +Ass uming +Ġem itting +ĠML G +Ġbirth place +Ġt aunt +ĠL uffy +ĠAm it +Ġcir cled +ĠN ost +enn ett +Ġde forestation +ĠHist orically +ĠEvery day +Ġovert ake +79 2 +Ġn un +ĠLuc ia +Ġaccompan ies +ĠSe eking +ĠTr ash +an ism +R ogue +Ġnorth western +ĠSupplement al +ĠNY U +ĠF RI +ĠSat isf +x es +5 17 +Ġreass ured +Ġspor adic +Ġ7 01 +Ġmed ial +Ġcannabin oid +Ġbarbar ic +Ġep is +ĠExplos ive +ĠD ough +Ġuns olved +Support ed +Ġacknowled gment +sp awn +Ġkit chens +Ġ- = +talk ing +ic ist +ĠPeg asus +ĠPS U +Ġphot on +ĠAuthent ication +R G +@# & +76 2 +ĠCl air +Ġdi aper +Ġbr ist +ĠProsecut ors +ĠJ em +6 28 +ĠEvery where +ĠJean ne +equ ality +ãĥ© ãĥ³ +object s +ĠPel icans +Ġ39 2 +Ġbl u +b ys +ĠA go +Ġinstruction al +Ġdiscrim inating +ĠTR AN +ĠCorn el +ag os +Ġty re +Ġas piration +ĠBrid gewater +": - +! ". +ĠEn s +ĠCoc o +P ie +Ġdet ach +ĠC ouch +Ġphys ique +ĠOccup ations +osc opic +en ough +B uzz +App earance +Y P +Ġrac er +Ġcompl icity +r pm +T oy +Ġinterrupt s +ĠCat alyst +Ġut ilitarian +imp act +Ġsp aghetti +Ġp orous +Ġeste emed +Ġinc iner +ĠI OC +7 48 +Ġesp resso +ĠSm ile +abil ia +6 35 +Ġmathematic ian +Ġ4 24 +ĠK L +ĠH IP +Ġover heard +ĠT ud +ĠT ec +Ġqu izz +Ġfl attering +Ġcon n +âĢ İ +Ġatt aches +ĠR OS +ĠAC S +Ġt cp +ĠSh ame +sk ip +res pected +ĠTrin idad +gr ain +Ġfooth old +ĠUnch arted +ĠJul io +z l +av ored +ĠAn xiety +er rors +ĠCent auri +its ch +D addy +Ġclutch ing +ĠIm plement +ĠGut ierrez +Ġ7 60 +Ġtele portation +end ra +Ġrevers ible +st ros +Ad venture +08 3 +Ġliber ating +Ġas phalt +ĠSp end +AR DS +im sy +PR ES +ĠEmer ging +Ġwild fires +Ġtechn ologically +Ġem its +ĠART ICLE +Ġirregular ities +Ġcher ish +çī Ī +Ġst ink +ĠR ost +Econom ic +Ġcough ing +ĠMcC ann +pro perties +ilant ro +Ġreneg oti +Trans lation +Ġin quest +ĠGra pe +oot ers +gu i +ĠSwords man +ace ae +h itting +Ġr c +Ġexert ed +ĠS AP +it ent +Ġperil ous +Ġobsc urity +Ġassass inate +Ġab original +Ġresc uing +ĠSh attered +lock ing +all ion +Ch anging +ĠHar rington +ĠB ord +ĠAfgh ans +Jam ie +aret z +ĠAugust us +Ġ38 6 +8 30 +Ġj og +ok ingly +Tr igger +ĠH OR +Stat istics +Ġviewers hip +Ġadd itives +h ur +Ġmaxim izing +ĠR ove +ĠLou ie +ĠBuck et +ĠCHR IST +ou sel +Ġstre aks +ir ted +Ġt ert +Ġcolonial ism +Ġbur ying +y k +Cond ition +ĠDPR K +By Id +75 1 +âĹ ¼ +Ġwor risome +Ġvoc ational +sl ice +Ġsa ils +ĠCorrection al +95 4 +Ġt ul +K id +l uster +Ġfam ilial +ĠSp it +ĠEp iscopal +Specific ally +ĠVol cano +run s +q s +Ġve tted +Ġcram med +t rop +here r +Thank fully +Ġper cussion +Ġor anges +Ġround up +Ġ4 99 +x ious +Char acters +ĠZion ism +ĠR ao +ÃĽ ÃĽ +W F +Ġunintention al +ONE Y +Gr ab +Com mercial +Ġglut amate +ĠMcK enna +ru ciating +ning ton +ih u +Ch an +ĠSw ap +Ġleaf lets +Ġfunction ally +er ous +F arm +Ġcal oric +ĠLiter ally +con cert +Ġshe nan +Ġrep aid +ey es +Ġbas hing +ĠG orge +Ġcollabor ations +Ġun account +itch ie +Ġteam work +pp elin +Ġpip ing +Ġmin ced +Ġd iam +ri eg +Ġmasc ara +Ġsuck er +ĠMo ons +App s +ĠPe ck +Ġper v +ĠFl oat +o ley +ĠN ish +im ize +Ġarom atic +u in +end ish +! / +ĠB icycle +ĠAS IC +ile ged +ĠQuad ro +ios yn +Ġlock out +ĠW ink +SP EC +Attempt s +Ġseed ed +red o +ias is +Ġsn ag +ãĥķ ãĤ© +ãĤ ¶ +Ġground ing +Ġrelie ver +Ġfrivol ous +ĠG ifts +ĠF aces +Es pecially +Ġmicrobi ome +im ag +ĠSch l +ĠP les +ĠBle ach +ĠIr win +ĠE aton +ĠDisc iple +Ġmultipl ication +Ġcoer ced +Ġ4 19 +st h +E vil +B omb +Ġex orc +Ġstag gered +L ESS +Ġinert ia +ĠED IT +Ġgo b +Tr aditional +Ġclass y +Lear y +ĠP AGE +yr s +Ġtrans porter +Ġmat ured +Ġhij ab +Ġbi ome +Where as +Ġex termination +ĠT ues +ĠT akeru +ĠAud rey +er ial +ĠAd en +aff les +Ġnarciss istic +ĠB aird +UT F +I re +ĠCon nie +Ch amp +Ġwhis pering +ĠH att +D K +Ġdis infect +Ġdeduct ed +Ġpart ake +Ġdown grade +ĠEs ports +ĠContin uing +Ġdemocr atically +icro bial +itt a +Ġlim estone +Ġexempt ed +ĠFren zy +H erm +7 28 +Ġfled gling +Met a +765 61 +69 3 +% : +w ake +5 26 +ĠDis cipline +Ġvirgin ity +ĠLeg ions +ĠFrank ie +int ent +Ġrest rooms +ĠRou ter +da q +Ġobjection able +âĨ ij +w ark +ĠRah ul +g ain +activ ation +abs olute +ĠAccess ed +Ġ24 00 +ogg les +Ġsecond ly +ĠDEF ENSE +Ġpost age +wra pper +sh arp +7 29 +Ġcommun icates +Ġadd on +ĠMil itia +H ong +Ġsl umped +ĠJP EG +ĠI car +ad ish +68 1 +Ġmaj esty +ĠWolf gang +ĠEl astic +u per +Ġv iz +Ġunconscious ly +ĠST D +ĠS ass +Ġflower ing +ĠHel ic +ĠDra per +ĠAm ateur +Ġman ure +Ġdis ingen +ĠLe i +br ing +9 49 +Ġinhib ited +Ġhead quartered +Ġen igmatic +�� � +Ġred ress +R H +Ġratt led +Ġd iction +l io +ĠT BA +ĠSN AP +C alling +Ġfasc ists +ĠD ove +iew icz +0 36 +Ġco asts +ĠR ect +Ġ) ] +L ot +6 29 +ĠS EM +ĠPeters en +ĠExpl ain +ĠBo ards +ĠBe zos +ĠJ ournals +Ġ20 24 +p arser +Ġmist rust +Ġgr ate +ĠL ocked +bo a +S aint +g aming +Ġvow el +in ately +bl ow +All ah +Ġun matched +Ġb ordering +ĠExp end +n r +Or acle +rou ch +Ġcont iguous +ac us +Ġdist raught +58 1 +Ġanat omical +O X +ap ixel +8 33 +ĠPL US +Ġres usc +Ġab iding +57 3 +Ġvac ancies +Em ily +Ġhyp othal +ĠWer ner +ĠWe e +ĠDJ s +5 13 +Ġwitch craft +Ġac upuncture +ent ary +benef it +Product s +ĠP SP +ĠMP G +ĠJ inn +ĠJ arrett +Ġ4 45 +ĠIm aging +ĠP yth +Fin ish +Ġte x +Ġjuven iles +Ġhero ism +Ġdoubt less +ĠA ki +ĠT end +ĠPatri arch +Ġbit ters +ĠTele communications +it atively +ag na +Ġr g +ĠS OLD +Ġcomp ulsion +ĠN asa +ĠKath ryn +Ġmillion aires +Ġintrins ically +Ġbolst ered +time out +fl o +Ġtut or +p our +Stat ement +Ġ{ * +ĠRud olph +ĠKimber ly +rog ens +adi q +] + +Ġindign ation +Ġfract uring +ĠRe leases +ĠGr ain +pro tein +L ago +Ġvac ations +Ġboot ed +ĠTH REE +ĠH G +oresc ence +Ġt f +Ġso ar +iosyn cr +Ġgl ances +ĠSp oon +ĠJ ury +ĠCow boy +Ġcreat ively +Hig her +Ġsolic itor +Ġhaw k +ac io +89 6 +Ġsuperf lu +Ġbombs hell +ct ure +Ġbroker age +Ġraid ing +Ġf rench +Ġang led +Trans action +ĠGen ocide +u pe +ĠHait ian +57 2 +! : +Ġunwitting ly +iter ator +sc roll +Ġtall ied +Ġbi omedical +ĠC ARD +Ġe uphem +Ġbrain storm +a quin +K o +Mic helle +ĠR unes +ĠBall istic +ud ers +Ġmod esty +ĠiP ads +ĠEzek iel +Y E +Ġstars hip +Ġpower fully +Ġper l +ĠSh ade +ĠQu art +ĠE EG +Ġfisher man +OS ED +ĠTyp ical +df x +Ġmes hes +Ġet ched +worth iness +Ġtopp led +Ġ3 96 +or ius +We iss +Ġmy sql +ĠVal halla +Ù Ĵ +le asing +Ġrec omp +rap nel +S el +04 3 +Ġder ailed +ĠGu ides +IR T +Ġde human +ĠBritt any +" )) +Ġex claim +Ġb alk +Ġ8 40 +CLA IM +int el +L AB +Ġpe gged +Ġast roph +sm oking +Ġrig ging +Ġfix ation +Ġcat apult +ins ide +ĠC ascade +ĠBolshe vik +G aza +Dep th +Ġloud spe +Ġalmond s +me yer +l eness +j en +f resh +Ġunbeat en +ĠSqu id +ĠPres umably +Tim er +B W +Ġro sters +Ġell ipt +ĠHar riet +dat abase +ĠMut ual +ĠComm odore +uk ed +kn ife +ĠCOMM UN +h ya +Ġmel ts +arch ives +Ġrat ification +Ġmultip lying +Ġinter oper +Ġasc ert +w ings +ver ting +ĠScorp ion +ay e +ĠPorts mouth +ĠM TA +n it +iaz ep +Ġqu arantine +Ġslides how +Ġcent imeters +Ġsyn opsis +Ġsp ate +th irst +Ġnom inating +ĠMel vin +Pre view +Ġthro b +Ġgener ational +ĠRad ius +rest ling +put able +aw ar +N ECT +Ġunlaw fully +ĠRevel ations +Wik ipedia +sur v +Ġeye ing +ij n +ĠF W +Ġbr unt +Ġinter stellar +Ġcl itor +ĠCroat ian +ĠCh ic +ev a +ĠDis app +ĠA kin +iner ies +d ust +Interest ed +Ġgen esis +ĠE ucl +ö n +p icking +Ġmut ated +Ġdisappro ve +ĠHD L +Ġ6 25 +Ì ¶ +c ancer +Ġsqu ats +Ġle vers +Disc uss += ] +D ex +ĠVIDE OS +A UD +Ġtrans act +ĠKin ect +ĠK uala +ĠC yp +7 47 +Ġsh attering +Ġarsen ic +ĠInt ake +ĠAngel o +ĠQu it +ĠK he +Ġ18 93 +M aker +0 29 +ĠPain ting +Dis able +9 16 +Ġanal ges +Ġtact ile +Ġprop hes +Ġd iced +ĠTravel s +ĠHe ader +ĠClub s +Ass istant +Ġinc rim +Ġd ips +Ġcruc ifix +ĠShan ahan +ĠInter pret +Ġ40 90 +al ogy +abb a +Ġsimul ac +hus band +S IM +Ġrecy cle +uc er +ed ged +Ġre naissance +ĠBomb ay +Cath olic +ĠL INE +ĠCl othing +re ports +Ġpl aus +Ġd ag +ĠM ace +Z I +Ġintr uder +ĠVeter inary +g ru +Ġsne aky +ĠS ie +ĠC innamon +P OSE +Ġcou rier +ĠC NS +Ġemanc ipation +s it +Ġplay through +ĠFac ilities +v irt +ĠG auntlet +Thom pson +Ġunbeliev ably +Param eters +Ġst itching +ign e +ĠTH ESE +Priv acy +Ġshenan igans +Ġvit ri +ĠVal id +59 1 +Ń · +ĠProt otype +ink a +SC P +ĠT id +è Ī +old ed +Ġindividual ity +Ġbark ing +Ġm ars +ĠW D +Ġ8 20 +Ġt ir +Ġsl apping +Ġdisgr untled +ĠAng ola +ri us +ĠTorn ado +ĠTh urs +Ġcapt cha +Ġang st +ĠP og +ĠAssass ins +ĠAd idas +Ġjoy ful +Ġwh ining +Emer gency +Ġphosph orus +Ġatt rition +oph on +ĠTimber wolves +ĠJ ah +ĠBr inging +ĠW ad +ĠEn sure +oh l +ĠX ie +omm el +c mp +Ġz ipper +Ġrel at +ĠCor ridor +m ilo +T ING +Av g +Ġcro pped +] } +Ġr aged +ĠLump ur +ĠGuer rero +our ke +N ut +Ġoff sets +og lu +dr m +Ġmort als +lat able +Ġdismiss ive +ä¸ ī +Ġthro ats +Ġchips et +ĠSpot light +Catal og +art ist +G b +Ġch illy +Ġst oked +Ġ3 74 +W ard +L atin +Ġf iasco +Ġble ach +Ġb rav +Enh anced +Ġin oc +ĠFior ina +_ > +Ġle ukemia +Ġel uc +Ġannoun cer +ĠLith uan +ĠArm ageddon +å ĩ +Len in +ĠR uk +Ġpe pp +ĠRom antic +ĠP IT +ĠInter stellar +ĠAt kinson +R aid +J s +Go al +C ourse +Ġvan ishing +es ley +ĠR ounds +Els a +59 3 +Ġredund ancy +ĠST AND +Ġprop hetic +Ġhabit able +ry u +Ġfaint ly +M ODE +Ġfl anked +IR C +Aw esome +Ġsp urious +ĠZ ah +ĠMS G +Ġsh ading +Ġmotiv ational +ĠSant ana +ĠS PR +Ġexc ruciating +om ial +ĠM iko +ĠLe opard +A byss +Ġ[ | +d irty +Ġbath s +Ġdem oral +and re +P B +Ġun ification +Ġsac rament +Ġ[ & +Ġpric eless +Ġgel atin +Ġeman ating +ĠAll aah +98 6 +Ġout burst +Ġer as +ĠX VI +ĠSP I +O tt +ĠLaz arus +PL IED +F lying +blog s +W isconsin +R aven +Ġreb ate +Ġcreep s +ĠSp an +ĠPain ter +ĠKir a +ĠAm os +ĠCor vette +Cons umer +ĠRec over +ck i +Ġpes ky +ĠIn vention +Compan ies +Ġchalleng ers +ad emic +ĠUkrain ians +ĠNeuro log +ĠFors aken +Ġent rants +Ġemb attled +Ġdef unct +ĠGlac ier +Ġpo isons +ĠH orses +m akes +ĠD irt +Ġ4 23 +hh h +ĠTrans formation +QUI RE +................ .. +Ġtrave ller +ĠSe xy +ĠK ern +ip olar +Ġransom ware +oooooooo oooooooo +E c +rub y +Prof essional +ĠOut break +arg ument +G rey +ĠFif a +ĠCH O +ĠFOR M +ĠAm trak +- [ +Ġcr adle +Ġantioxid ants +ãģ®å ® +7 36 +ĠNAS L +ĠContribut ions +Ind iana +ĠST EP +C SS +Ġsal ient +Ġall ocations +yr ights +Ġm ashed +ĠCut ter +Sex ual +Ġp ounded +Ġfan base +Ġc asc +ĠTrans parency +Ġanaly tic +ĠSummon er +× ŀ +ĠAD C +det ail +Ġvan quished +Ġcr abs +ar ie +Dest roy +ĠS ack +Ġtrans istor +Al abama +ĠK oen +ĠFisher ies +c one +Ġannex ed +ĠM GM +es a +Ġf aked +ĠCong ratulations +Ġhind ered +Ġcorrection al +ĠI TV +lee ve +Ġin appropriately +lic ks +Ġtresp ass +Ġp aws +Ġnegoti ator +ĠChrist ensen +lim its +ĠDian ne +Ġeleg ance +ĠContract s +an ke +Ob j +Ġvigil ance +Ġcast les +ĠN AD +ĠHol o +Ġemph atically +ĠTit us +ĠServ ing +ĠRich ie +ĠP igs +5 68 +Ġanim osity +ĠAtt ributes +ĠU riel +M Q +my ra +ĠApplic ant +Ġpsychiat rists +ĠV ij +ĠAb by +ag ree +P ush +Ġk Wh +hib a +Ġinc ite +ĠWe asley +ĠTax i +minist ic +hy per +ĠF arn +Ġ6 01 +ĠNation wide +F ake +95 2 +Ġma ize +Ġinteract ed +Ġtransition ed +Ġparas itic +Ġharm onic +Ġdec aying +Ġbas eless +ns ics +Ġtrans pired +Ġabund antly +ĠFore nsic +Ġtread mill +ĠJ av +ab and +Ġssh d +Ġfront man +ĠJak arta +oll er +dro ps +ĠSERV ICES +rompt u +oph ical +h ospital +bled on +6 45 +Ġmid range +ĠEV ENT +cul ated +raw led +Ġper ched +Ġover board +ĠPe el +ĠP wr +ĠCar th +ĠCOM PLE +co e +sh all +Ġdeter rence +M ETHOD +ĠAbs ent +M EN +Ġs ill +ĠLE VEL +Y ork +Ġsin ners +ĠOP EC +ĠN ur +ĠDesign s +se lection +Ġunw orthy +CH A +Ġstreng thens +88 3 +ed ly +Ġslic ing +Ġmal nutrition +Ġfilm making +ĠPol k +ur ated +Ġ4 21 +bre akers +!' " +Ġwet lands +ĠDisc rimination +Ġallow able +Ġste ered +ĠSic ily +S AM +Ġmust ache +Ġm ids +Ġcl ipped +Ġcirc ulate +Ġbr ittle +ĠBuild ings +ra ised +ĠRound up +Ġwealth ier +Ġoverw rite +Ġover powered +ĠGerr ard +s ites +PD ATED +Ġacute ly +ĠGam ble +Ġp im +ĠK us +Typ ically +De ploy +ĠMoroc can +p otion +com be +Ġvigil ante +Ġ36 3 +St ew +ĠB agg +Ġres ided +ĠSp o +Ġrem nant +Ġempt iness +br ainer +Ġout patient +pri ority +Ġle ptin +ĠPay ton +ĠGle aming +ĠS hed +ĠPol o +ĠMormon ism +rest ricted +arl ane +w x +Ġcreat ine +ĠAn on +ĠST UD +ĠJ UL +ĠT ee +5 28 +08 9 +Ġhat ched +Dis patch +ĠCompos ite +Ġ45 1 +p uff +ĠX COM +ĠOr n +ĠTH ANK +END ED +ĠAshe ville +Ġà ľ +Ġman go +ĠS lightly +world ly +ĠW ander +ĠExp and +ĠCh r +M ist +Ġorthodox y +ĠUN ESCO +reg ate +Else where +k ie +ir led +Ġtopp le +Ġadopt ive +ĠLeg s +d ress +ĠS agan +b are +ĠGl ou +Cr unch +Ġhelp ers +Ġchron ically +ĠH uma +1 0000 +Ġaccommod ating +äº Ķ +Ġwrink les +Ġdod ged +four th +Ġpre con +Ġcompress or +ĠK are +Ġev ict +ĠWar wick +im ar +Ġmodern ization +Ġband wagon +Ġref uted +Ġnet ted +ĠNa ples +ĠGen ie +per ors +Ġfield ed +Ġde re +ĠPar ables +le es +Ġtr out +asp ers +Ġn ihil +Ġhapp iest +Ġflo ppy +ĠLo ft +ĠHe ard +Ġun ison +Ġl ug +ĠRed mond +class ic +Supp orters +SH IP +G MT +Ġfue lled +ç IJ +Ġd d +ĠEmin em +Ġ18 97 +NY SE +Ġsecret aries +ĠF IA +ĠCanaver al +F avorite +Ġp omp +Ġdetain ee +ers hip +aim on +i our +ĠA pex +Ġplant ations +am ia +ac ion +R ust +Ġtow ed +ĠTru ly +5 77 +Ġshel tered +r ider +W o +Ġl air +ĠInt elligent +impro ve +m atically +Ġet iquette +ad ra +all o +ĠJun o +any thing +ĠStru ggle +ĠPred ict +ĠGr imes +ĠAMER ICA +ct x +ĠSit uation +W OOD +Ġsol uble +me ier +Ġintoler able +ang ering +Ġun interrupted +Ġtool tip +Ġinterrog ated +Ġgun ned +ĠSne ak +æŃ ¦ +Ġt ether +Ġcr umble +L ens +Ġclust ered +ĠSy l +ĠHas an +Ġdystop ian +w ana +Ġjoy stick +ĠTh ib +amm u +Tom orrow +5 46 +Ġoverc ame +Ġminim ized +cept or +Run ner +ENG TH +ĠBrend a +ĠAchieve ments +Ġtor ches +Ġrapp ort +ĠInvestig ator +ĠHand ling +rel ation +g rey +8 15 +Ġk cal +ĠComm ands +d q +Ġcur ls +Ġbe arer +Ġcyn icism +it ri +ĠUse ful +B ee +D CS +Ġab ras +P ract +BIL ITIES +7 12 +Ġdebug ger +Ġdebt or +ĠL ia +ĠK ers +Ġexacerb ate +ĠSt acy +ĠB land +ĠSc enes +Ġbranch ing +âĸĪâĸĪâĸĪâĸĪ âĸĪâĸĪâĸĪâĸĪ +ape ake +Ġs alsa +Ġmish and +ĠKon ami +ĠN ib +Ġanecd ote +Ġagree able +Ï ī +ĠNath aniel +ĠHe isman +ĠB eware +Ġ18 86 +spect ive +69 1 +5 22 +Ġinhib its +Ġhas hing +Ġ18 89 +å° Ĩ +v ich +P ure +Ġsolid ly +Ġaspir in +im aru +Ġstreet car +ĠU CS +ĠJ udd +Ġflash backs +p ins +Ġ14 40 +ĠUN HCR +ĠSym ptoms +T IT +5 38 +F ra +% ); +Ġo oz +Ġcur few +Ġcal med +Ġparticip ates +Te X +Ġnons ensical +Ġfull back +ĠDe L +mon key +h ari +Ġmetabol ites +Ġloot ed +ĠAL WAYS +ĠB CC +L t +oc het +B one +Ġveto ed +Ġg cc +ĠCL ICK +Ġ18 88 +s af +Ġstiff ness +Ġlow ly +ĠGe h +vers on +ors et +Ġun foreseen +Ġan esthesia +ĠOpt ical +Ġrecon structed +ĠT up +sh ows +NEW S +ĠNewsp aper +ĠA SA +ter a +N umbers +Ġinexpl icable +× ij +Ġhard ness +unt arily +ĠA cer +grad ient +ARD IS +Ġwood land +Ġmetaph ors +ĠWem bley +ĠPa vel +phil is +Ġre writing +Ġpercept ual +Ġ10 70 +worm s +ĠDown s +Ġunsur prisingly +Ġtag ging +fl ame +Ġlit res +Ġboun ces +ĠB abe +sh ut +Ġoverd oses +ĠShe ila +ĠCh au +ĠBl ess +Capt ure +ĠSign ificant +ĠSc ion +Ġ38 9 +ĠMc H +ĠTitan ium +ĠMe al +amed a +ag ents +agg ressive +B illy +76 3 +ĠS aying +DER R +it one +Coll ins +B ound +Ġbol ted +ĠDM CA +95 3 +Ġun iqueness +Ġep igen +un ci +ant am +Ġreck oning +ch airs +OG R +ĠSen egal +Ġ18 62 +re levant +Ġ ¯ +Ġpharm acies +ĠG eral +v ier +Y an +OR PG +Ġrab id +b ending +ĠUN ITED +Ġ4 65 +As sembly +Ġwe ep +Ġbe hest +ĠMother s +ĠJ ace +h id +Ġwh irlwind +ĠUN IVERS +Ġut opian +Ġkidn ap +Ph ilipp +K in +89 3 +Ġlivest ream +ĠM ISS +Ġsub versive +ĠTechn iques +ĠJUST ICE +ĠB ASE +Ġ38 7 +Ġassail ants +ĠHard core +Ġsprink led +ĠP se +é ļ +print ed +ĠH au +OR GE +ĠT OUR +Ġl aced +Ġit ch +G iving +Ġport ed +78 1 +//////////////// //////////////// +bre eding +Ġlog ger +ĠH OL +inn ie +First ly +Ġembry onic +Ġdeleg ated +p ai +O IL +Ġcentr ally +ĠR x +ĠSc outing +D utch +Ġhe reditary +ĠCru iser +s at +5 29 +ĠMar riott +other mal +Ġprohib itions +E arn +ĠSt ab +ĠColleg es +ĠBel ief +st retched +ĠL H +ĠEntity Item +C IA +Ġun rem +Ġlaure ate +Ġdenomin ations +sum mary +h ler +S pect +ĠK laus +ĠBe ans +Ġins ur +ĠPA X +Ġfield er +ĠV et +ĠSp arrow +z ie +ĠS Q +ĠMond ays +ĠOff line +ĠLer ner +ĠExt ensions +Ire land +Ġpatron age +Ġcontrast ed +ĠMan ia +h irt +Mos cow +Ġcondem ns +ĠAn ge +Ġcomp osing +ĠPe pe +ĠP addock +Ġheter ogeneity +Ġide ologically +Ġf ishes +Ġcur sing +ĠR utherford +ĠFlo ating +ĠAm elia +Te a +Syn opsis +Ġstun ts +Ġbe ad +Ġstock ing +ĠM ILL +ob ook +mass ive +\ < +Ġh ump +ĠPref erences +Engine Debug +ge ist +ĠNiet o +ome ver +ish y +eval uate +col onial +Altern ative +ĠGo Pro +ĠV ortex +ĠNET WORK +ans ky +Sec ure +ĠTh rust +Sn ake +Ġparcel s +Ġsam urai +Ġactress es +N ap +M F +ifer ation +Be er +5 23 +ĠI ly +oint ment +P ing +Ġstri ped +ĠMell on +oss ession +Ġneut ron +end ium +Ġa ph +ĠFlav oring +Ġ38 3 +Ġrespons iveness +ĠJ indal +ĠHitch cock +Den ver +ĠDRAG ON +sm anship +ĠDu pl +Ġs ly +Ġweb cam +ĠTw ain +ĠDar ling +ili ate +cons umer +D IT +Ġnames ake +Ġun orthodox +Ġfun er +ĠPL oS +ĠCONTR OL +ozy g +ogl obin +F ACE +ER G +ĠD ia +ĠF iesta +ce le +0 34 +Ġencl ave +âĸ¬ âĸ¬ +on ement +al ist +M and +Ġhome grown +ĠF ancy +Ġconcept ions +ĠCont ains +ure en +Ġreiter ate +Ġme ager +Ġinstall ments +Sp awn +6 27 +Ġphot oc +ĠCab rera +ĠRos enthal +ĠLans ing +is ner +Ġinvest s +ĠUFO s +EX P +Hard ware +Ġtr agically +Ġconced es +ie ft +ch am +bor gh +ĠSch r +ĠMel anie +ĠH oy +Ġvisit ation +Ġid iosyncr +Ġfract ions +Ġfore skin +ob os +Ġpo aching +ĠVI EW +Ġstimul ates +ĠG ork +can on +M IC +ĠNem esis +ĠInd ra +ĠDM V +Ġ5 29 +Ġinspect ing +Ġgrand ma +ĠW hedon +ĠSh ant +ĠP urg +ik an +ĠT eg +ĠCL R +z ac +Vict oria +ĠVer ify +ion ics +Ġpart ying +ĠM ou +col our +Ġtestim onies +l ations +Ġpress uring +hi ro +ac ers +Ġf id +ang ler +ĠCS I +Ġhere after +Ġdiss idents +report ing +iph any +che v +Ġsol itude +Ġl obe +Ġind is +Ġcred ential +re cent +ad ult +ĠNir vana +ĠFranch ise +L ayer +H yp +ĠBerks hire +Ġwill s +t if +Ġtot em +ĠJud ah +rep air +Inst ant +5 48 +Ġemb assies +Ġbott leneck +Ġb ount +Ġtyp ew +ĠAl vin +j ing +im ilar +R ush +Ġbr im +ĠHEL P +A im +] ' +Ġpass ively +Ġbound ed +ĠR ated +Ġcriminal ity +Ġbiom ark +Ġdisp atcher +ĠTow ards +Ġ+ ++ +right eous +f rog +ĠP anc +C arter +0 32 +æ© Ł +Ġult raviolet +ĠLic ensed +ĠT ata +ĠBl essing +ĠG AM +Ġchem ically +ĠSe af +ĠRE LE +ĠMerc enary +capital ist +Ġform ulations +Ġann ihilation +ĠVer b +ĠAr gon +Ġun loaded +Ġmorp hed +Ġconqu ering +back er +I ELD +Ġtheft s +Ġfront runner +ĠRoy ale +ĠFund amental +el ight +C hip +necess ary +ay n +ĠSl ip +Ġ4 48 +cern ed +P ause +Ġshock ingly +ĠAB V +Ġcomp osure +7 33 +ĠMotors port +ah ime +Mur ray +M ach +Ġgr ids +Ġdeb ian +Ġfurther more +Ġdexter ity +ĠCollect ions +os lov +il age +b j +ĠMont eneg +Ġstrut Connector +Ġmassac res +Ġbrief s +fet ched +uv ian +ol ition +Fail ure +emon ic +Ġfl ared +Ġclaim ant +Ġc ures +Ġgive aways +ĠSubst ance +al ions +Ġcr inge +ĠK ul +Ġarist ocracy +ĠUl ster +ol ated +h ousing +ĠM IS +Ġgl ared +ĠWil helm +ne eds +lam bda +build ers +ĠV IS +Ġradi ator +ĠGhost busters +Ġ4 36 +act ual +Ġher ds +ç a +watch ing +Ġcounter ing +Ch arge +Ġchar red +Ġwar heads +Ġiod ine +ĠM acy +04 1 +Ġdepart ures +ĠS ins +Ġdy ed +ĠConcept s +g ado +7 13 +Ġquot ations +Ġg ist +ĠChrist y +Ġant igen +ĠHem p +ĠD rawn +ĠB arg +ez vous +Ġp aternity +Ġar du +ĠAnch orage +ĠR ik +Ġover loaded +ĠUs ername +ĠTam my +ĠN au +ĠCell ular +Ġw aning +Ġrod ent +ĠWor cester +il ts +ĠT ad +Ġdwell ings +Ġbull ish +4 31 +Ġretali ate +Ġmig raine +ĠChev ron +CH ECK +Ġdon key +c rim +SP A +ĠAn alog +Ġmarqu ee +ĠHa as +B ir +ĠGD DR +ĠDownload s +Ġwill power +ĠFor th +ĠRecord ed +Ġimp ossibility +ĠLog ged +ĠFr anks +ĠR att +in itions +Ġclean ers +Ġsore ly +Ġflick ering +ĠEx amination +c atching +allow een +Ms g +Ġdun no +F a +Ġdys ph +c razy +.' '. +Ġmain line +Ġc s +Ġp tr +ĠW ally +ig un +95 1 +ĠBig foot +f ights +Ġretrie ving +J r +Ġdupl ication +ĠExpl an +Ġrel ational +Ġqu aint +Ġbisc uits +Ġad o +Ġsh udder +Ġantid ote +blood ed +ks h +Ġsa uces +Ġrein vest +Ġdispens ary +ĠD iver +Ġ9 000 +stud ent +Ġin separ +esc ap +Ġtodd lers +ĠGP IO +ĠAss ignment +head ers +Ġlack luster +Ġab ack +95 6 +Ġtool bar +7 45 +Ġo ust +Ġcontempl ation +ĠPRES IDENT +Ġ4 58 +==== == +Ġguarantee ing +ĠHe ist +ĠCann es +Ļ ½ +Ġcollabor ator +ĠAm p +Ġg ou +ĠSH ALL +st ories +78 3 +Ġmobil ized +Ġbro od +ĠL U +ĠðŁ ij +Ġref in +ĠAnthrop ology +v ind +ill i +Ġwarrant ies +ĠB abel +Ġsw ath +Ġc aches +Ġantagon ists +art ifacts +Ġhot ly +ĠSt arts +ĠG ö +z ag +!! !!! +Ġsc ourge +Ġcons piring +ru its +re verse +ĠShe en +ĠJes uit +ĠGiov anni +ad ies +Ġbutt ocks +ear cher +ac an +Ġvolley ball +Ġshroud ed +Ġscore board +b ats +ĠI PM +Ġass es +Ġde regulation +ĠTe legram +ĠReb oot +Ġ7 000 +ĠCan ary +Ġk ernels +ĠFranç ois +ĠD uff +ĠP on +ĠLe ica +ĠGar min +Ġor phans +ĠClaud ia +Ġcal endars +ĠLe ilan +ent o +R ocket +Ġbr unch +ĠHaw king +ain ers +Ġsens ibilities +Ġk W +ĠK and +Ġre claimed +Ġinteresting ly +× © +rom y +J M +ĠEnhance ment +b ush +Sk ip +Ġrapp ers +Ġg azing +p edia +ath lon +Rev olution +Ġsn ipers +Ġre verted +Ġconglomer ate +T erry +79 4 +Ġhars her +Ġdes olate +ĠHit man +Comm ission +Ġ( / +âĢ¦ ." +Com par +Ġampl ification +om inated +Ġreg ress +ĠColl ider +Ġinform ants +Ġg azed diff --git a/phi-2/special_tokens_map.json b/phi-2/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..0204ed10c186a4c7c68f55dff8f26087a45898d6 --- /dev/null +++ b/phi-2/special_tokens_map.json @@ -0,0 +1,5 @@ +{ + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "unk_token": "<|endoftext|>" +} diff --git a/phi-2/tokenizer.json b/phi-2/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..c1148447551675ea739c440ee3e247df9f354d8f --- /dev/null +++ b/phi-2/tokenizer.json @@ -0,0 +1,100647 @@ +{ + "version": "1.0", + "truncation": null, + "padding": null, + "added_tokens": [ + { + "id": 50256, + "content": "<|endoftext|>", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 50257, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50258, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50259, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50260, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50261, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50262, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50263, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50264, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50265, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50266, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50267, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50268, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50269, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50270, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50271, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50272, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50273, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50274, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50275, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50276, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50277, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50278, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50279, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50280, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50281, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50282, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50283, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50284, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50285, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50286, + "content": " ", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50287, + "content": "\t\t\t\t\t\t\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50288, + "content": "\t\t\t\t\t\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50289, + "content": "\t\t\t\t\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50290, + "content": "\t\t\t\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50291, + "content": "\t\t\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50292, + "content": "\t\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50293, + "content": "\t\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + }, + { + "id": 50294, + "content": "\t\t", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": false + } + ], + "normalizer": null, + "pre_tokenizer": { + "type": "ByteLevel", + "add_prefix_space": false, + "trim_offsets": true, + "use_regex": true + }, + "post_processor": { + "type": "ByteLevel", + "add_prefix_space": true, + "trim_offsets": false, + "use_regex": true + }, + "decoder": { + "type": "ByteLevel", + "add_prefix_space": true, + "trim_offsets": true, + "use_regex": true + }, + "model": { + "type": "BPE", + "dropout": null, + "unk_token": null, + "continuing_subword_prefix": "", + "end_of_word_suffix": "", + "fuse_unk": false, + "byte_fallback": false, + "vocab": { + "!": 0, + "\"": 1, + "#": 2, + "$": 3, + "%": 4, + "&": 5, + "'": 6, + "(": 7, + ")": 8, + "*": 9, + "+": 10, + ",": 11, + "-": 12, + ".": 13, + "/": 14, + "0": 15, + "1": 16, + "2": 17, + "3": 18, + "4": 19, + "5": 20, + "6": 21, + "7": 22, + "8": 23, + "9": 24, + ":": 25, + ";": 26, + "<": 27, + "=": 28, + ">": 29, + "?": 30, + "@": 31, + "A": 32, + "B": 33, + "C": 34, + "D": 35, + "E": 36, + "F": 37, + "G": 38, + "H": 39, + "I": 40, + "J": 41, + "K": 42, + "L": 43, + "M": 44, + "N": 45, + "O": 46, + "P": 47, + "Q": 48, + "R": 49, + "S": 50, + "T": 51, + "U": 52, + "V": 53, + "W": 54, + "X": 55, + "Y": 56, + "Z": 57, + "[": 58, + "\\": 59, + "]": 60, + "^": 61, + "_": 62, + "`": 63, + "a": 64, + "b": 65, + "c": 66, + "d": 67, + "e": 68, + "f": 69, + "g": 70, + "h": 71, + "i": 72, + "j": 73, + "k": 74, + "l": 75, + "m": 76, + "n": 77, + "o": 78, + "p": 79, + "q": 80, + "r": 81, + "s": 82, + "t": 83, + "u": 84, + "v": 85, + "w": 86, + "x": 87, + "y": 88, + "z": 89, + "{": 90, + "|": 91, + "}": 92, + "~": 93, + "¡": 94, + "¢": 95, + "£": 96, + "¤": 97, + "¥": 98, + "¦": 99, + "§": 100, + "¨": 101, + "©": 102, + "ª": 103, + "«": 104, + "¬": 105, + "®": 106, + "¯": 107, + "°": 108, + "±": 109, + "²": 110, + "³": 111, + "´": 112, + "µ": 113, + "¶": 114, + "·": 115, + "¸": 116, + "¹": 117, + "º": 118, + "»": 119, + "¼": 120, + "½": 121, + "¾": 122, + "¿": 123, + "À": 124, + "Á": 125, + "Â": 126, + "Ã": 127, + "Ä": 128, + "Å": 129, + "Æ": 130, + "Ç": 131, + "È": 132, + "É": 133, + "Ê": 134, + "Ë": 135, + "Ì": 136, + "Í": 137, + "Î": 138, + "Ï": 139, + "Ð": 140, + "Ñ": 141, + "Ò": 142, + "Ó": 143, + "Ô": 144, + "Õ": 145, + "Ö": 146, + "×": 147, + "Ø": 148, + "Ù": 149, + "Ú": 150, + "Û": 151, + "Ü": 152, + "Ý": 153, + "Þ": 154, + "ß": 155, + "à": 156, + "á": 157, + "â": 158, + "ã": 159, + "ä": 160, + "å": 161, + "æ": 162, + "ç": 163, + "è": 164, + "é": 165, + "ê": 166, + "ë": 167, + "ì": 168, + "í": 169, + "î": 170, + "ï": 171, + "ð": 172, + "ñ": 173, + "ò": 174, + "ó": 175, + "ô": 176, + "õ": 177, + "ö": 178, + "÷": 179, + "ø": 180, + "ù": 181, + "ú": 182, + "û": 183, + "ü": 184, + "ý": 185, + "þ": 186, + "ÿ": 187, + "Ā": 188, + "ā": 189, + "Ă": 190, + "ă": 191, + "Ą": 192, + "ą": 193, + "Ć": 194, + "ć": 195, + "Ĉ": 196, + "ĉ": 197, + "Ċ": 198, + "ċ": 199, + "Č": 200, + "č": 201, + "Ď": 202, + "ď": 203, + "Đ": 204, + "đ": 205, + "Ē": 206, + "ē": 207, + "Ĕ": 208, + "ĕ": 209, + "Ė": 210, + "ė": 211, + "Ę": 212, + "ę": 213, + "Ě": 214, + "ě": 215, + "Ĝ": 216, + "ĝ": 217, + "Ğ": 218, + "ğ": 219, + "Ġ": 220, + "ġ": 221, + "Ģ": 222, + "ģ": 223, + "Ĥ": 224, + "ĥ": 225, + "Ħ": 226, + "ħ": 227, + "Ĩ": 228, + "ĩ": 229, + "Ī": 230, + "ī": 231, + "Ĭ": 232, + "ĭ": 233, + "Į": 234, + "į": 235, + "İ": 236, + "ı": 237, + "IJ": 238, + "ij": 239, + "Ĵ": 240, + "ĵ": 241, + "Ķ": 242, + "ķ": 243, + "ĸ": 244, + "Ĺ": 245, + "ĺ": 246, + "Ļ": 247, + "ļ": 248, + "Ľ": 249, + "ľ": 250, + "Ŀ": 251, + "ŀ": 252, + "Ł": 253, + "ł": 254, + "Ń": 255, + "Ġt": 256, + "Ġa": 257, + "he": 258, + "in": 259, + "re": 260, + "on": 261, + "Ġthe": 262, + "er": 263, + "Ġs": 264, + "at": 265, + "Ġw": 266, + "Ġo": 267, + "en": 268, + "Ġc": 269, + "it": 270, + "is": 271, + "an": 272, + "or": 273, + "es": 274, + "Ġb": 275, + "ed": 276, + "Ġf": 277, + "ing": 278, + "Ġp": 279, + "ou": 280, + "Ġan": 281, + "al": 282, + "ar": 283, + "Ġto": 284, + "Ġm": 285, + "Ġof": 286, + "Ġin": 287, + "Ġd": 288, + "Ġh": 289, + "Ġand": 290, + "ic": 291, + "as": 292, + "le": 293, + "Ġth": 294, + "ion": 295, + "om": 296, + "ll": 297, + "ent": 298, + "Ġn": 299, + "Ġl": 300, + "st": 301, + "Ġre": 302, + "ve": 303, + "Ġe": 304, + "ro": 305, + "ly": 306, + "Ġbe": 307, + "Ġg": 308, + "ĠT": 309, + "ct": 310, + "ĠS": 311, + "id": 312, + "ot": 313, + "ĠI": 314, + "ut": 315, + "et": 316, + "ĠA": 317, + "Ġis": 318, + "Ġon": 319, + "im": 320, + "am": 321, + "ow": 322, + "ay": 323, + "ad": 324, + "se": 325, + "Ġthat": 326, + "ĠC": 327, + "ig": 328, + "Ġfor": 329, + "ac": 330, + "Ġy": 331, + "ver": 332, + "ur": 333, + "Ġu": 334, + "ld": 335, + "Ġst": 336, + "ĠM": 337, + "'s": 338, + "Ġhe": 339, + "Ġit": 340, + "ation": 341, + "ith": 342, + "ir": 343, + "ce": 344, + "Ġyou": 345, + "il": 346, + "ĠB": 347, + "Ġwh": 348, + "ol": 349, + "ĠP": 350, + "Ġwith": 351, + "Ġ1": 352, + "ter": 353, + "ch": 354, + "Ġas": 355, + "Ġwe": 356, + "Ġ(": 357, + "nd": 358, + "ill": 359, + "ĠD": 360, + "if": 361, + "Ġ2": 362, + "ag": 363, + "ers": 364, + "ke": 365, + "Ġ\"": 366, + "ĠH": 367, + "em": 368, + "Ġcon": 369, + "ĠW": 370, + "ĠR": 371, + "her": 372, + "Ġwas": 373, + "Ġr": 374, + "od": 375, + "ĠF": 376, + "ul": 377, + "ate": 378, + "Ġat": 379, + "ri": 380, + "pp": 381, + "ore": 382, + "ĠThe": 383, + "Ġse": 384, + "us": 385, + "Ġpro": 386, + "Ġha": 387, + "um": 388, + "Ġare": 389, + "Ġde": 390, + "ain": 391, + "and": 392, + "Ġor": 393, + "igh": 394, + "est": 395, + "ist": 396, + "ab": 397, + "rom": 398, + "ĠN": 399, + "th": 400, + "Ġcom": 401, + "ĠG": 402, + "un": 403, + "op": 404, + "00": 405, + "ĠL": 406, + "Ġnot": 407, + "ess": 408, + "Ġex": 409, + "Ġv": 410, + "res": 411, + "ĠE": 412, + "ew": 413, + "ity": 414, + "ant": 415, + "Ġby": 416, + "el": 417, + "os": 418, + "ort": 419, + "oc": 420, + "qu": 421, + "Ġfrom": 422, + "Ġhave": 423, + "Ġsu": 424, + "ive": 425, + "ould": 426, + "Ġsh": 427, + "Ġthis": 428, + "nt": 429, + "ra": 430, + "pe": 431, + "ight": 432, + "art": 433, + "ment": 434, + "Ġal": 435, + "ust": 436, + "end": 437, + "--": 438, + "all": 439, + "ĠO": 440, + "ack": 441, + "Ġch": 442, + "Ġle": 443, + "ies": 444, + "red": 445, + "ard": 446, + "âĢ": 447, + "out": 448, + "ĠJ": 449, + "Ġab": 450, + "ear": 451, + "iv": 452, + "ally": 453, + "our": 454, + "ost": 455, + "gh": 456, + "pt": 457, + "Ġpl": 458, + "ast": 459, + "Ġcan": 460, + "ak": 461, + "ome": 462, + "ud": 463, + "The": 464, + "Ġhis": 465, + "Ġdo": 466, + "Ġgo": 467, + "Ġhas": 468, + "ge": 469, + "'t": 470, + "ĠU": 471, + "rou": 472, + "Ġsa": 473, + "Ġj": 474, + "Ġbut": 475, + "Ġwor": 476, + "Ġall": 477, + "ect": 478, + "Ġk": 479, + "ame": 480, + "Ġwill": 481, + "ok": 482, + "Ġwhe": 483, + "Ġthey": 484, + "ide": 485, + "01": 486, + "ff": 487, + "ich": 488, + "pl": 489, + "ther": 490, + "Ġtr": 491, + "..": 492, + "Ġint": 493, + "ie": 494, + "ure": 495, + "age": 496, + "Ġne": 497, + "ial": 498, + "ap": 499, + "ine": 500, + "ice": 501, + "Ġme": 502, + "Ġout": 503, + "ans": 504, + "one": 505, + "ong": 506, + "ions": 507, + "Ġwho": 508, + "ĠK": 509, + "Ġup": 510, + "Ġtheir": 511, + "Ġad": 512, + "Ġ3": 513, + "Ġus": 514, + "ated": 515, + "ous": 516, + "Ġmore": 517, + "ue": 518, + "og": 519, + "ĠSt": 520, + "ind": 521, + "ike": 522, + "Ġso": 523, + "ime": 524, + "per": 525, + ".\"": 526, + "ber": 527, + "iz": 528, + "act": 529, + "Ġone": 530, + "Ġsaid": 531, + "Ġ-": 532, + "are": 533, + "Ġyour": 534, + "cc": 535, + "ĠTh": 536, + "Ġcl": 537, + "ep": 538, + "ake": 539, + "able": 540, + "ip": 541, + "Ġcont": 542, + "Ġwhich": 543, + "ia": 544, + "Ġim": 545, + "Ġabout": 546, + "Ġwere": 547, + "very": 548, + "ub": 549, + "Ġhad": 550, + "Ġen": 551, + "Ġcomp": 552, + ",\"": 553, + "ĠIn": 554, + "Ġun": 555, + "Ġag": 556, + "ire": 557, + "ace": 558, + "au": 559, + "ary": 560, + "Ġwould": 561, + "ass": 562, + "ry": 563, + "ĠâĢ": 564, + "cl": 565, + "ook": 566, + "ere": 567, + "so": 568, + "ĠV": 569, + "ign": 570, + "ib": 571, + "Ġoff": 572, + "Ġte": 573, + "ven": 574, + "ĠY": 575, + "ile": 576, + "ose": 577, + "ite": 578, + "orm": 579, + "Ġ201": 580, + "Ġres": 581, + "Ġman": 582, + "Ġper": 583, + "Ġother": 584, + "ord": 585, + "ult": 586, + "Ġbeen": 587, + "Ġlike": 588, + "ase": 589, + "ance": 590, + "ks": 591, + "ays": 592, + "own": 593, + "ence": 594, + "Ġdis": 595, + "ction": 596, + "Ġany": 597, + "Ġapp": 598, + "Ġsp": 599, + "int": 600, + "ress": 601, + "ations": 602, + "ail": 603, + "Ġ4": 604, + "ical": 605, + "Ġthem": 606, + "Ġher": 607, + "ount": 608, + "ĠCh": 609, + "Ġar": 610, + "Ġif": 611, + "Ġthere": 612, + "Ġpe": 613, + "Ġyear": 614, + "av": 615, + "Ġmy": 616, + "Ġsome": 617, + "Ġwhen": 618, + "ough": 619, + "ach": 620, + "Ġthan": 621, + "ru": 622, + "ond": 623, + "ick": 624, + "Ġover": 625, + "vel": 626, + "Ġqu": 627, + "ĊĊ": 628, + "Ġsc": 629, + "reat": 630, + "ree": 631, + "ĠIt": 632, + "ound": 633, + "port": 634, + "Ġalso": 635, + "Ġpart": 636, + "fter": 637, + "Ġkn": 638, + "Ġbec": 639, + "Ġtime": 640, + "ens": 641, + "Ġ5": 642, + "ople": 643, + "Ġwhat": 644, + "Ġno": 645, + "du": 646, + "mer": 647, + "ang": 648, + "Ġnew": 649, + "----": 650, + "Ġget": 651, + "ory": 652, + "ition": 653, + "ings": 654, + "Ġjust": 655, + "Ġinto": 656, + "Ġ0": 657, + "ents": 658, + "ove": 659, + "te": 660, + "Ġpeople": 661, + "Ġpre": 662, + "Ġits": 663, + "Ġrec": 664, + "Ġtw": 665, + "ian": 666, + "irst": 667, + "ark": 668, + "ors": 669, + "Ġwork": 670, + "ade": 671, + "ob": 672, + "Ġshe": 673, + "Ġour": 674, + "wn": 675, + "ink": 676, + "lic": 677, + "Ġ19": 678, + "ĠHe": 679, + "ish": 680, + "nder": 681, + "ause": 682, + "Ġhim": 683, + "ons": 684, + "Ġ[": 685, + "Ġro": 686, + "form": 687, + "ild": 688, + "ates": 689, + "vers": 690, + "Ġonly": 691, + "oll": 692, + "Ġspe": 693, + "ck": 694, + "ell": 695, + "amp": 696, + "Ġacc": 697, + "Ġbl": 698, + "ious": 699, + "urn": 700, + "ft": 701, + "ood": 702, + "Ġhow": 703, + "hed": 704, + "Ġ'": 705, + "Ġafter": 706, + "aw": 707, + "Ġatt": 708, + "ov": 709, + "ne": 710, + "Ġplay": 711, + "erv": 712, + "ict": 713, + "Ġcould": 714, + "itt": 715, + "Ġam": 716, + "Ġfirst": 717, + "Ġ6": 718, + "Ġact": 719, + "Ġ$": 720, + "ec": 721, + "hing": 722, + "ual": 723, + "ull": 724, + "Ġcomm": 725, + "oy": 726, + "old": 727, + "ces": 728, + "ater": 729, + "Ġfe": 730, + "Ġbet": 731, + "we": 732, + "iff": 733, + "Ġtwo": 734, + "ock": 735, + "Ġback": 736, + ").": 737, + "ident": 738, + "Ġunder": 739, + "rough": 740, + "sel": 741, + "xt": 742, + "Ġmay": 743, + "round": 744, + "Ġpo": 745, + "ph": 746, + "iss": 747, + "Ġdes": 748, + "Ġmost": 749, + "Ġdid": 750, + "Ġadd": 751, + "ject": 752, + "Ġinc": 753, + "fore": 754, + "Ġpol": 755, + "ont": 756, + "Ġagain": 757, + "clud": 758, + "tern": 759, + "Ġknow": 760, + "Ġneed": 761, + "Ġcons": 762, + "Ġco": 763, + "Ġ.": 764, + "Ġwant": 765, + "Ġsee": 766, + "Ġ7": 767, + "ning": 768, + "iew": 769, + "ĠThis": 770, + "ced": 771, + "Ġeven": 772, + "Ġind": 773, + "ty": 774, + "ĠWe": 775, + "ath": 776, + "Ġthese": 777, + "Ġpr": 778, + "Ġuse": 779, + "Ġbecause": 780, + "Ġfl": 781, + "ng": 782, + "Ġnow": 783, + "ĠâĢĵ": 784, + "com": 785, + "ise": 786, + "Ġmake": 787, + "Ġthen": 788, + "ower": 789, + "Ġevery": 790, + "ĠUn": 791, + "Ġsec": 792, + "oss": 793, + "uch": 794, + "Ġem": 795, + "Ġ=": 796, + "ĠRe": 797, + "ied": 798, + "rit": 799, + "Ġinv": 800, + "lect": 801, + "Ġsupp": 802, + "ating": 803, + "Ġlook": 804, + "man": 805, + "pect": 806, + "Ġ8": 807, + "row": 808, + "Ġbu": 809, + "Ġwhere": 810, + "ific": 811, + "Ġyears": 812, + "ily": 813, + "Ġdiff": 814, + "Ġshould": 815, + "Ġrem": 816, + "Th": 817, + "In": 818, + "Ġev": 819, + "day": 820, + "'re": 821, + "rib": 822, + "Ġrel": 823, + "ss": 824, + "Ġdef": 825, + "Ġright": 826, + "Ġsy": 827, + "),": 828, + "les": 829, + "000": 830, + "hen": 831, + "Ġthrough": 832, + "ĠTr": 833, + "__": 834, + "Ġway": 835, + "Ġdon": 836, + "Ġ,": 837, + "Ġ10": 838, + "ased": 839, + "Ġass": 840, + "ublic": 841, + "Ġreg": 842, + "ĠAnd": 843, + "ix": 844, + "Ġvery": 845, + "Ġinclud": 846, + "other": 847, + "Ġimp": 848, + "oth": 849, + "Ġsub": 850, + "ĠâĢĶ": 851, + "Ġbeing": 852, + "arg": 853, + "ĠWh": 854, + "==": 855, + "ible": 856, + "Ġdoes": 857, + "ange": 858, + "ram": 859, + "Ġ9": 860, + "ert": 861, + "ps": 862, + "ited": 863, + "ational": 864, + "Ġbr": 865, + "Ġdown": 866, + "Ġmany": 867, + "aking": 868, + "Ġcall": 869, + "uring": 870, + "ities": 871, + "Ġph": 872, + "ics": 873, + "als": 874, + "Ġdec": 875, + "ative": 876, + "ener": 877, + "Ġbefore": 878, + "ility": 879, + "Ġwell": 880, + "Ġmuch": 881, + "erson": 882, + "Ġthose": 883, + "Ġsuch": 884, + "Ġke": 885, + "Ġend": 886, + "ĠBut": 887, + "ason": 888, + "ting": 889, + "Ġlong": 890, + "ef": 891, + "Ġthink": 892, + "ys": 893, + "Ġbel": 894, + "Ġsm": 895, + "its": 896, + "ax": 897, + "Ġown": 898, + "Ġprov": 899, + "Ġset": 900, + "ife": 901, + "ments": 902, + "ble": 903, + "ward": 904, + "Ġshow": 905, + "Ġpres": 906, + "ms": 907, + "omet": 908, + "Ġob": 909, + "Ġsay": 910, + "ĠSh": 911, + "ts": 912, + "ful": 913, + "Ġeff": 914, + "Ġgu": 915, + "Ġinst": 916, + "und": 917, + "ren": 918, + "cess": 919, + "Ġent": 920, + "ĠYou": 921, + "Ġgood": 922, + "Ġstart": 923, + "ince": 924, + "Ġmade": 925, + "tt": 926, + "stem": 927, + "olog": 928, + "up": 929, + "Ġ|": 930, + "ump": 931, + "Ġhel": 932, + "vern": 933, + "ular": 934, + "ually": 935, + "Ġac": 936, + "Ġmon": 937, + "Ġlast": 938, + "Ġ200": 939, + "10": 940, + "Ġstud": 941, + "ures": 942, + "ĠAr": 943, + "self": 944, + "ars": 945, + "meric": 946, + "ues": 947, + "cy": 948, + "Ġmin": 949, + "ollow": 950, + "Ġcol": 951, + "io": 952, + "Ġmod": 953, + "Ġcount": 954, + "ĠCom": 955, + "hes": 956, + "Ġfin": 957, + "air": 958, + "ier": 959, + "âĢĶ": 960, + "read": 961, + "ank": 962, + "atch": 963, + "ever": 964, + "Ġstr": 965, + "Ġpoint": 966, + "ork": 967, + "ĠNew": 968, + "Ġsur": 969, + "ool": 970, + "alk": 971, + "ement": 972, + "Ġused": 973, + "ract": 974, + "ween": 975, + "Ġsame": 976, + "oun": 977, + "ĠAl": 978, + "ci": 979, + "Ġdiffere": 980, + "Ġwhile": 981, + "--------": 982, + "Ġgame": 983, + "cept": 984, + "Ġsim": 985, + "...": 986, + "Ġinter": 987, + "ek": 988, + "Ġreport": 989, + "Ġprodu": 990, + "Ġstill": 991, + "led": 992, + "ah": 993, + "Ġhere": 994, + "Ġworld": 995, + "Ġthough": 996, + "Ġnum": 997, + "arch": 998, + "imes": 999, + "ale": 1000, + "ĠSe": 1001, + "ĠIf": 1002, + "//": 1003, + "ĠLe": 1004, + "Ġret": 1005, + "Ġref": 1006, + "Ġtrans": 1007, + "ner": 1008, + "ution": 1009, + "ters": 1010, + "Ġtake": 1011, + "ĠCl": 1012, + "Ġconf": 1013, + "way": 1014, + "ave": 1015, + "Ġgoing": 1016, + "Ġsl": 1017, + "ug": 1018, + "ĠAmeric": 1019, + "Ġspec": 1020, + "Ġhand": 1021, + "Ġbetween": 1022, + "ists": 1023, + "ĠDe": 1024, + "oot": 1025, + "It": 1026, + "Ġear": 1027, + "Ġagainst": 1028, + "Ġhigh": 1029, + "gan": 1030, + "az": 1031, + "ather": 1032, + "Ġexp": 1033, + "Ġop": 1034, + "Ġins": 1035, + "Ġgr": 1036, + "Ġhelp": 1037, + "Ġrequ": 1038, + "ets": 1039, + "ins": 1040, + "ĠPro": 1041, + "ism": 1042, + "Ġfound": 1043, + "land": 1044, + "ata": 1045, + "uss": 1046, + "ames": 1047, + "Ġperson": 1048, + "Ġgreat": 1049, + "pr": 1050, + "Ġsign": 1051, + "ĠAn": 1052, + "'ve": 1053, + "Ġsomet": 1054, + "Ġser": 1055, + "hip": 1056, + "Ġrun": 1057, + "Ġ:": 1058, + "Ġter": 1059, + "irect": 1060, + "Ġfollow": 1061, + "Ġdet": 1062, + "ices": 1063, + "Ġfind": 1064, + "12": 1065, + "Ġmem": 1066, + "Ġcr": 1067, + "ered": 1068, + "ex": 1069, + "Ġext": 1070, + "uth": 1071, + "ense": 1072, + "co": 1073, + "Ġteam": 1074, + "ving": 1075, + "ouse": 1076, + "ash": 1077, + "att": 1078, + "ved": 1079, + "Ġsystem": 1080, + "ĠAs": 1081, + "der": 1082, + "ives": 1083, + "min": 1084, + "Ġlead": 1085, + "ĠBl": 1086, + "cent": 1087, + "Ġaround": 1088, + "Ġgovern": 1089, + "Ġcur": 1090, + "velop": 1091, + "any": 1092, + "Ġcour": 1093, + "alth": 1094, + "ages": 1095, + "ize": 1096, + "Ġcar": 1097, + "ode": 1098, + "Ġlaw": 1099, + "Ġread": 1100, + "'m": 1101, + "con": 1102, + "Ġreal": 1103, + "Ġsupport": 1104, + "Ġ12": 1105, + "....": 1106, + "Ġreally": 1107, + "ness": 1108, + "Ġfact": 1109, + "Ġday": 1110, + "Ġboth": 1111, + "ying": 1112, + "Ġserv": 1113, + "ĠFor": 1114, + "Ġthree": 1115, + "Ġwom": 1116, + "Ġmed": 1117, + "ody": 1118, + "ĠThey": 1119, + "50": 1120, + "Ġexper": 1121, + "ton": 1122, + "Ġeach": 1123, + "akes": 1124, + "Ġche": 1125, + "Ġcre": 1126, + "ines": 1127, + "Ġrep": 1128, + "19": 1129, + "gg": 1130, + "illion": 1131, + "Ġgrou": 1132, + "ute": 1133, + "ik": 1134, + "We": 1135, + "get": 1136, + "ER": 1137, + "Ġmet": 1138, + "Ġsays": 1139, + "ox": 1140, + "Ġduring": 1141, + "ern": 1142, + "ized": 1143, + "ared": 1144, + "Ġfam": 1145, + "ically": 1146, + "Ġhapp": 1147, + "ĠIs": 1148, + "Ġchar": 1149, + "med": 1150, + "vent": 1151, + "Ġgener": 1152, + "ient": 1153, + "ple": 1154, + "iet": 1155, + "rent": 1156, + "11": 1157, + "ves": 1158, + "ption": 1159, + "Ġ20": 1160, + "formation": 1161, + "Ġcor": 1162, + "Ġoffic": 1163, + "ield": 1164, + "Ġtoo": 1165, + "ision": 1166, + "Ġinf": 1167, + "ĠZ": 1168, + "the": 1169, + "oad": 1170, + "Ġpublic": 1171, + "Ġprog": 1172, + "ric": 1173, + "**": 1174, + "Ġwar": 1175, + "Ġpower": 1176, + "view": 1177, + "Ġfew": 1178, + "Ġloc": 1179, + "Ġdifferent": 1180, + "Ġstate": 1181, + "Ġhead": 1182, + "'ll": 1183, + "Ġposs": 1184, + "Ġstat": 1185, + "ret": 1186, + "ants": 1187, + "Ġval": 1188, + "Ġiss": 1189, + "Ġcle": 1190, + "ivers": 1191, + "anc": 1192, + "Ġexpl": 1193, + "Ġanother": 1194, + "ĠQ": 1195, + "Ġav": 1196, + "thing": 1197, + "nce": 1198, + "Wh": 1199, + "Ġchild": 1200, + "Ġsince": 1201, + "ired": 1202, + "less": 1203, + "Ġlife": 1204, + "Ġdevelop": 1205, + "ittle": 1206, + "Ġdep": 1207, + "Ġpass": 1208, + "ãĥ": 1209, + "Ġturn": 1210, + "orn": 1211, + "This": 1212, + "bers": 1213, + "ross": 1214, + "ĠAd": 1215, + "Ġfr": 1216, + "Ġresp": 1217, + "Ġsecond": 1218, + "oh": 1219, + "Ġ/": 1220, + "Ġdisc": 1221, + "Ġ&": 1222, + "Ġsomething": 1223, + "Ġcomple": 1224, + "Ġed": 1225, + "Ġfil": 1226, + "Ġmonth": 1227, + "aj": 1228, + "uc": 1229, + "Ġgovernment": 1230, + "Ġwithout": 1231, + "Ġleg": 1232, + "Ġdist": 1233, + "Ġput": 1234, + "Ġquest": 1235, + "ann": 1236, + "Ġprot": 1237, + "20": 1238, + "Ġnever": 1239, + "ience": 1240, + "Ġlevel": 1241, + "Ġart": 1242, + "Ġthings": 1243, + "Ġmight": 1244, + "Ġeffect": 1245, + "Ġcontro": 1246, + "Ġcent": 1247, + "Ġ18": 1248, + "Ġallow": 1249, + "Ġbelie": 1250, + "chool": 1251, + "ott": 1252, + "Ġincre": 1253, + "Ġfeel": 1254, + "Ġresult": 1255, + "Ġlot": 1256, + "Ġfun": 1257, + "ote": 1258, + "Ġty": 1259, + "erest": 1260, + "Ġcontin": 1261, + "Ġusing": 1262, + "Ġbig": 1263, + "201": 1264, + "Ġask": 1265, + "Ġbest": 1266, + "Ġ)": 1267, + "IN": 1268, + "Ġopp": 1269, + "30": 1270, + "Ġnumber": 1271, + "iness": 1272, + "St": 1273, + "lease": 1274, + "Ġca": 1275, + "Ġmust": 1276, + "Ġdirect": 1277, + "Ġgl": 1278, + "Ġ<": 1279, + "Ġopen": 1280, + "Ġpost": 1281, + "Ġcome": 1282, + "Ġseem": 1283, + "ording": 1284, + "Ġweek": 1285, + "ately": 1286, + "ital": 1287, + "Ġel": 1288, + "riend": 1289, + "Ġfar": 1290, + "Ġtra": 1291, + "inal": 1292, + "Ġpri": 1293, + "ĠUS": 1294, + "Ġplace": 1295, + "Ġform": 1296, + "Ġtold": 1297, + "\":": 1298, + "ains": 1299, + "ature": 1300, + "ĠTrump": 1301, + "Ġstand": 1302, + "Ġ#": 1303, + "ider": 1304, + "ĠFr": 1305, + "Ġnext": 1306, + "Ġsoc": 1307, + "Ġpur": 1308, + "Ġlet": 1309, + "Ġlittle": 1310, + "Ġhum": 1311, + "Ġi": 1312, + "ron": 1313, + "15": 1314, + "Ġ15": 1315, + "Ġcommun": 1316, + "Ġmark": 1317, + "ĠThere": 1318, + "Ġwr": 1319, + "ĠThat": 1320, + "Ġinformation": 1321, + "ways": 1322, + "Ġbus": 1323, + "app": 1324, + "Ġinvest": 1325, + "me": 1326, + "Ġhard": 1327, + "ained": 1328, + "ead": 1329, + "Ġimport": 1330, + "Ġappro": 1331, + "Ġtest": 1332, + "Ġtri": 1333, + "Ġrest": 1334, + "osed": 1335, + "Ġfull": 1336, + "Ġcare": 1337, + "ĠSp": 1338, + "Ġcase": 1339, + "ON": 1340, + "Ġsk": 1341, + "Ġless": 1342, + "Ġ+": 1343, + "Ġpartic": 1344, + "ĠPl": 1345, + "ably": 1346, + "uck": 1347, + "ished": 1348, + "chn": 1349, + "be": 1350, + "Ġlist": 1351, + "ator": 1352, + "Ġtop": 1353, + "Ġadv": 1354, + "ĠBe": 1355, + "ruct": 1356, + "Ġdem": 1357, + "ration": 1358, + "ling": 1359, + "gy": 1360, + "reen": 1361, + "ger": 1362, + "Ġhome": 1363, + "Ġleft": 1364, + "Ġbetter": 1365, + "Ġdata": 1366, + "Ġ11": 1367, + "Ġattack": 1368, + "Ġproble": 1369, + "line": 1370, + "ards": 1371, + "Ġbeh": 1372, + "ral": 1373, + "ĠHow": 1374, + "ĠShe": 1375, + "arge": 1376, + "Ġ--": 1377, + "://": 1378, + "Ġbro": 1379, + "ĠPh": 1380, + "ats": 1381, + "Ġbuild": 1382, + "ww": 1383, + "ided": 1384, + "aim": 1385, + "ases": 1386, + "ency": 1387, + "Ġmain": 1388, + "ined": 1389, + "Ġincluding": 1390, + "Ġ{": 1391, + "Ġgot": 1392, + "Ġinterest": 1393, + "Ġkeep": 1394, + "ĠX": 1395, + "Ġeas": 1396, + "aining": 1397, + "Ġclass": 1398, + "âĢ¦": 1399, + "ĠNo": 1400, + "Ġvar": 1401, + "Ġsmall": 1402, + "ample": 1403, + "AT": 1404, + "Ġide": 1405, + "ĠSo": 1406, + "Ġrece": 1407, + "Ġpolit": 1408, + "Ġmov": 1409, + "Ġplan": 1410, + "Ġpercent": 1411, + "iving": 1412, + "Ġcamp": 1413, + "Ġpay": 1414, + "14": 1415, + "sc": 1416, + "ised": 1417, + "Ġunt": 1418, + "oney": 1419, + "ploy": 1420, + "====": 1421, + "Ġdidn": 1422, + "ĠInd": 1423, + "els": 1424, + "ertain": 1425, + "Ġpos": 1426, + "____": 1427, + "iver": 1428, + "Ġprocess": 1429, + "Ġprogram": 1430, + "ified": 1431, + "ĠRep": 1432, + "16": 1433, + "uro": 1434, + "ology": 1435, + "atter": 1436, + "ina": 1437, + "Ġname": 1438, + "ĠAll": 1439, + "Ġfour": 1440, + "Ġreturn": 1441, + "vious": 1442, + "bs": 1443, + "Ġcalled": 1444, + "Ġmove": 1445, + "ĠSc": 1446, + "ird": 1447, + "Ġgroup": 1448, + "Ġbre": 1449, + "Ġmen": 1450, + "Ġcap": 1451, + "ten": 1452, + "ee": 1453, + "Ġdri": 1454, + "leg": 1455, + "here": 1456, + "uthor": 1457, + "Ġpat": 1458, + "Ġcurrent": 1459, + "ides": 1460, + "Ġpop": 1461, + "to": 1462, + "ention": 1463, + "Ġalways": 1464, + "Ġmil": 1465, + "Ġwomen": 1466, + "Ġ16": 1467, + "Ġold": 1468, + "iven": 1469, + "raph": 1470, + "ĠOr": 1471, + "ror": 1472, + "ently": 1473, + "Ġnear": 1474, + "ĠEx": 1475, + "ream": 1476, + "sh": 1477, + "Ġ14": 1478, + "Ġfree": 1479, + "ission": 1480, + "stand": 1481, + "ĠCon": 1482, + "ality": 1483, + "used": 1484, + "13": 1485, + "Ġdesign": 1486, + "Ġchange": 1487, + "Ġchang": 1488, + "Ġbo": 1489, + "Ġvis": 1490, + "ember": 1491, + "Ġbook": 1492, + "ready": 1493, + "Ġkill": 1494, + "25": 1495, + "pped": 1496, + "Ġaway": 1497, + "Ġable": 1498, + "Ġcountry": 1499, + "Ġconst": 1500, + "arn": 1501, + "Ġorder": 1502, + "AR": 1503, + "ior": 1504, + "ium": 1505, + "orth": 1506, + "18": 1507, + "ailable": 1508, + "Ġsw": 1509, + "Ġmillion": 1510, + "Ġ13": 1511, + "atic": 1512, + "ted": 1513, + "ĠGo": 1514, + "Ġoper": 1515, + "eng": 1516, + "Ġthing": 1517, + "ajor": 1518, + "conom": 1519, + "ĠComm": 1520, + "Ġwhy": 1521, + "ured": 1522, + "ural": 1523, + "Ġschool": 1524, + "by": 1525, + "ĠMar": 1526, + "Ġaff": 1527, + "Ġdays": 1528, + "Ġann": 1529, + "ush": 1530, + "ane": 1531, + "If": 1532, + "eg": 1533, + "Ġprof": 1534, + "Ġhealth": 1535, + "outh": 1536, + "But": 1537, + "ional": 1538, + ".,": 1539, + "Ġsol": 1540, + "Ġalready": 1541, + "Ġ30": 1542, + "Ġcharact": 1543, + "He": 1544, + "Ġfriend": 1545, + "ES": 1546, + "ians": 1547, + "icle": 1548, + "'d": 1549, + "ĠOn": 1550, + "Ġleast": 1551, + "Ġprom": 1552, + "Ġdr": 1553, + "Ġhist": 1554, + "ither": 1555, + "Ġest": 1556, + "iqu": 1557, + "17": 1558, + "son": 1559, + "Ġtell": 1560, + "Ġtalk": 1561, + "ohn": 1562, + "oint": 1563, + "lection": 1564, + "AN": 1565, + "Ġuntil": 1566, + "augh": 1567, + "Ġlater": 1568, + "Ġve": 1569, + "Ġview": 1570, + "ending": 1571, + "ived": 1572, + "Ġword": 1573, + "ware": 1574, + "Ġcost": 1575, + "Ġenough": 1576, + "Ġgive": 1577, + "ĠUnited": 1578, + "Ġtechn": 1579, + "arent": 1580, + "OR": 1581, + "Ġpar": 1582, + "ĠDr": 1583, + "Ġ2016": 1584, + "rist": 1585, + "ering": 1586, + "ĠÂ": 1587, + "Ġlarge": 1588, + "side": 1589, + "acy": 1590, + "ccess": 1591, + "Ġwin": 1592, + "Ġimportant": 1593, + "Ġ199": 1594, + "Ġdoesn": 1595, + "Ġ17": 1596, + "Ġbusiness": 1597, + "Ġclear": 1598, + "Ġrese": 1599, + "\",": 1600, + "ury": 1601, + "Ġequ": 1602, + "aster": 1603, + "alf": 1604, + "ĠAmerican": 1605, + "nect": 1606, + "Ġexpect": 1607, + "iversity": 1608, + "Ġocc": 1609, + "ĠFl": 1610, + "Ġkind": 1611, + "Ġmean": 1612, + "Ġpast": 1613, + "Ġdev": 1614, + "Ġbas": 1615, + "let": 1616, + "raft": 1617, + "Ġorgan": 1618, + "Ġdel": 1619, + "Ġperform": 1620, + "Ġstory": 1621, + "Ġseason": 1622, + "ĠCol": 1623, + "Ġclaim": 1624, + "Ġcame": 1625, + "Ġwithin": 1626, + "Ġline": 1627, + "Ġproject": 1628, + "ĠAt": 1629, + "Ġcontrol": 1630, + "ended": 1631, + "ĠSy": 1632, + "Ġair": 1633, + "ization": 1634, + "Ġ*": 1635, + "ley": 1636, + "Ġmoney": 1637, + "idd": 1638, + "You": 1639, + "for": 1640, + "Ġfamily": 1641, + "Ġmaking": 1642, + "Ġbit": 1643, + "Ġpolice": 1644, + "Ġhappen": 1645, + "Ġvers": 1646, + "ony": 1647, + "uff": 1648, + "ĠWhen": 1649, + "Ġsit": 1650, + "ideo": 1651, + "lf": 1652, + "ison": 1653, + "Ġsure": 1654, + "gin": 1655, + "Ġappear": 1656, + "Ġlight": 1657, + "Ġes": 1658, + "of": 1659, + "Ġwater": 1660, + "Ġtimes": 1661, + "not": 1662, + "Ġgrow": 1663, + "Ġcompany": 1664, + "ĠTe": 1665, + "ows": 1666, + "Ġmar": 1667, + "ource": 1668, + "iol": 1669, + "arm": 1670, + "br": 1671, + "Ġexample": 1672, + "Ġconc": 1673, + "Ġfore": 1674, + "ĠTo": 1675, + "pro": 1676, + "EN": 1677, + "ries": 1678, + "Ġ25": 1679, + "ĠCan": 1680, + "ney": 1681, + "Ġactually": 1682, + "Ġever": 1683, + "urity": 1684, + "aken": 1685, + "aps": 1686, + "Ġtax": 1687, + "Ġmajor": 1688, + "ama": 1689, + "Ġoften": 1690, + "eral": 1691, + "Ġhuman": 1692, + "Ġjob": 1693, + "ister": 1694, + "Ġavailable": 1695, + "ocr": 1696, + "enn": 1697, + "aid": 1698, + "ivid": 1699, + "Ġrecord": 1700, + "?\"": 1701, + "Ġsing": 1702, + "ĠAm": 1703, + "idence": 1704, + "Ġnews": 1705, + "ster": 1706, + "Ġeconom": 1707, + "Ġfollowing": 1708, + "ĠBr": 1709, + "ising": 1710, + "Ġhour": 1711, + "most": 1712, + "ument": 1713, + "Ġsex": 1714, + "Ġdesc": 1715, + "Ġbecome": 1716, + "ĠEd": 1717, + "Ġtook": 1718, + "Ġhaving": 1719, + "Ġproduct": 1720, + "ault": 1721, + "As": 1722, + "aring": 1723, + "Ġmeans": 1724, + "Ġhop": 1725, + "une": 1726, + "Ġcho": 1727, + "Ġcertain": 1728, + "Ġnon": 1729, + "Ġdeal": 1730, + "24": 1731, + "lement": 1732, + "oci": 1733, + "ene": 1734, + "Ġside": 1735, + "ĠPr": 1736, + "ĠMay": 1737, + "Ġreason": 1738, + "ued": 1739, + "ched": 1740, + "ulation": 1741, + "Ġelect": 1742, + "Ġofficial": 1743, + "Ġpossible": 1744, + "Ġhold": 1745, + "ands": 1746, + "ots": 1747, + "Ġcity": 1748, + "ories": 1749, + "Ġsever": 1750, + "Ġchildren": 1751, + "Ġonce": 1752, + "Ġactiv": 1753, + "ler": 1754, + "Ġnight": 1755, + "itions": 1756, + "ĠJohn": 1757, + "ape": 1758, + "play": 1759, + "Ġdone": 1760, + "Ġlim": 1761, + "Ġworking": 1762, + "ĠPres": 1763, + "orld": 1764, + "eb": 1765, + "ĠCo": 1766, + "Ġbody": 1767, + "ails": 1768, + "utes": 1769, + "ĠMr": 1770, + "Ġwhether": 1771, + "Ġauthor": 1772, + "rop": 1773, + "Ġproper": 1774, + "Ġseen": 1775, + ");": 1776, + "Ġfac": 1777, + "ĠSu": 1778, + "Ġcond": 1779, + "iting": 1780, + "Ġcourse": 1781, + "Ġ}": 1782, + "----------------": 1783, + "aign": 1784, + "Ġevent": 1785, + "Ġeng": 1786, + "Ġpot": 1787, + "Ġintern": 1788, + "iam": 1789, + "Ġshort": 1790, + "empt": 1791, + "ãĤ": 1792, + "ĠGod": 1793, + "ilar": 1794, + "80": 1795, + "Ġorig": 1796, + "IS": 1797, + "ourn": 1798, + "ability": 1799, + "itive": 1800, + "Ġdam": 1801, + "Ġ100": 1802, + "Ġpress": 1803, + "Ġdoing": 1804, + "Ġprotect": 1805, + "ring": 1806, + "Ġthought": 1807, + "Ġquestion": 1808, + "rew": 1809, + "ĠWar": 1810, + "Ġseveral": 1811, + "ĠState": 1812, + "Ġgiven": 1813, + "Ġfund": 1814, + "ĠTw": 1815, + "Ġwent": 1816, + "ances": 1817, + "work": 1818, + "por": 1819, + "my": 1820, + "40": 1821, + "Ġarg": 1822, + "artment": 1823, + "ustom": 1824, + "Ġpolic": 1825, + "Ġmeet": 1826, + "Ġcreat": 1827, + "22": 1828, + "ĠStates": 1829, + "Ġgames": 1830, + "raw": 1831, + "uture": 1832, + "Ġunderstand": 1833, + "urs": 1834, + "ĠOb": 1835, + "lish": 1836, + "sy": 1837, + "Ġmakes": 1838, + "Ġwon": 1839, + "agon": 1840, + "Ġhtt": 1841, + "Ġlove": 1842, + "ential": 1843, + "Ġcomplete": 1844, + "par": 1845, + "ĠIm": 1846, + "AL": 1847, + "Ġaccount": 1848, + "Âł": 1849, + "ored": 1850, + "vert": 1851, + "Ġident": 1852, + "Ġ2015": 1853, + "Ġothers": 1854, + "ĠMin": 1855, + "iber": 1856, + "verage": 1857, + "There": 1858, + "itional": 1859, + "dd": 1860, + "Ġprob": 1861, + "Ġyoung": 1862, + "Ġalong": 1863, + "Ġaccording": 1864, + "Ġyet": 1865, + "Ġmembers": 1866, + "ĠWhat": 1867, + "oid": 1868, + "ĠMan": 1869, + "And": 1870, + "Ġamong": 1871, + "ai": 1872, + "Ġemploy": 1873, + "ĠRes": 1874, + "Ġ>": 1875, + "Ġinvol": 1876, + "Ġlow": 1877, + "af": 1878, + "ĠCar": 1879, + "Ġhig": 1880, + "ĠOne": 1881, + "ĠSec": 1882, + "ination": 1883, + "Ġlikely": 1884, + "Ġant": 1885, + "aged": 1886, + "ĠRuss": 1887, + "Ġben": 1888, + "Ġrele": 1889, + "For": 1890, + "back": 1891, + "ĠNot": 1892, + "Ġpresident": 1893, + "ball": 1894, + "Ġaccess": 1895, + "ividual": 1896, + "ĠDem": 1897, + "ĠEuro": 1898, + "60": 1899, + "Ġknown": 1900, + "irl": 1901, + "ĠGr": 1902, + "Ġearly": 1903, + "use": 1904, + "iety": 1905, + "âĢĵ": 1906, + "Ġfight": 1907, + "Ġsent": 1908, + "Ġtoday": 1909, + "Ġmarket": 1910, + "\".": 1911, + "Ġbased": 1912, + "Ġstrong": 1913, + "urther": 1914, + "Ġdeb": 1915, + "mber": 1916, + "Ġproblem": 1917, + "Ġdeath": 1918, + "Ġsocial": 1919, + "imate": 1920, + "AS": 1921, + "ortun": 1922, + "Ġcampaign": 1923, + "ery": 1924, + "Ch": 1925, + "Ġey": 1926, + "ially": 1927, + "Ġmus": 1928, + "wh": 1929, + "pos": 1930, + "Ġer": 1931, + "Ġsaf": 1932, + "Ġmonths": 1933, + "iron": 1934, + "Ġviol": 1935, + "Ġfive": 1936, + "Ġstre": 1937, + "Ġplayers": 1938, + "inc": 1939, + "ald": 1940, + "year": 1941, + "aun": 1942, + "Ġsuccess": 1943, + "Ġpresent": 1944, + "erence": 1945, + "Ġ2014": 1946, + "Ġsugg": 1947, + "Ġparticular": 1948, + "Ġtry": 1949, + "Ġsuggest": 1950, + "ĠChrist": 1951, + "ones": 1952, + "Ġpriv": 1953, + "23": 1954, + "Ġcrit": 1955, + "Ġland": 1956, + "Ġlocal": 1957, + "ify": 1958, + "29": 1959, + "Ġaut": 1960, + "ED": 1961, + "ĠGu": 1962, + "Ġmult": 1963, + "Ġpolitical": 1964, + "Ġasked": 1965, + "Ġformer": 1966, + "itter": 1967, + "ript": 1968, + "Ġclose": 1969, + "Ġpract": 1970, + "ĠYork": 1971, + "Ġgetting": 1972, + "Ġacross": 1973, + "Ġcomb": 1974, + "Ġbelieve": 1975, + "Ġz": 1976, + "Ġtoget": 1977, + "Ġtogether": 1978, + "ĠCent": 1979, + "irc": 1980, + "Ġindividual": 1981, + "ĠMc": 1982, + "27": 1983, + "isk": 1984, + "ĠEng": 1985, + "Ġface": 1986, + "Ġ24": 1987, + "Ġvalue": 1988, + "Ġarea": 1989, + "ev": 1990, + "Ġwrit": 1991, + "ĠPresident": 1992, + "Ġvot": 1993, + "Ġkey": 1994, + "Ġmom": 1995, + "put": 1996, + "Ġanything": 1997, + "Ġexperience": 1998, + "attle": 1999, + "Ġmind": 2000, + "aff": 2001, + "omm": 2002, + "Ġfuture": 2003, + "ged": 2004, + "Ġcut": 2005, + "Ġtot": 2006, + "itch": 2007, + "Ġvideo": 2008, + "Ġinvestig": 2009, + "Ġnet": 2010, + "ĠMy": 2011, + "rict": 2012, + "ien": 2013, + ".)": 2014, + "Ġimpro": 2015, + "though": 2016, + "wards": 2017, + "Ġconnect": 2018, + "ĠMed": 2019, + "selves": 2020, + "ensive": 2021, + "mb": 2022, + "ober": 2023, + "ators": 2024, + "An": 2025, + "Ġ50": 2026, + "Ġredu": 2027, + "resent": 2028, + "Ġabove": 2029, + "Ġfre": 2030, + "ĠEurope": 2031, + "sw": 2032, + "Ġamount": 2033, + "ĠApp": 2034, + "Ġeither": 2035, + "Ġmilit": 2036, + "Ġanal": 2037, + "Ġfail": 2038, + "ĠEn": 2039, + "ales": 2040, + "Ġspecial": 2041, + "Ġblack": 2042, + "IT": 2043, + "cher": 2044, + "Ġlooking": 2045, + "Ġfire": 2046, + "yn": 2047, + "Ġalmost": 2048, + "oon": 2049, + "Ġstudy": 2050, + "Ġmiss": 2051, + "ches": 2052, + "rown": 2053, + "Ġtre": 2054, + "Ġcommunity": 2055, + "Ġmedia": 2056, + "Ġfood": 2057, + "Ġcomes": 2058, + "ĠUniversity": 2059, + "Ġsingle": 2060, + "What": 2061, + "uly": 2062, + "Ġhalf": 2063, + "ague": 2064, + "hod": 2065, + "ĠRepublic": 2066, + "Ġstarted": 2067, + "Ġquick": 2068, + "oto": 2069, + "book": 2070, + "Ġissue": 2071, + "itor": 2072, + "Ġelse": 2073, + "Ġconsider": 2074, + "26": 2075, + "rodu": 2076, + "Ġtaken": 2077, + "28": 2078, + "99": 2079, + "ĠWith": 2080, + "Ġtrue": 2081, + "Ġwa": 2082, + "Ġtrad": 2083, + "Ġago": 2084, + "Ġmess": 2085, + "ief": 2086, + "Ġadded": 2087, + "oke": 2088, + "Ġbad": 2089, + "Ġfav": 2090, + "33": 2091, + "Ġsimilar": 2092, + "ask": 2093, + "ĠDon": 2094, + "Ġcharacter": 2095, + "orts": 2096, + "ĠHouse": 2097, + "Ġreported": 2098, + "Ġtype": 2099, + "val": 2100, + "iod": 2101, + "ĠHowever": 2102, + "Ġtarg": 2103, + "Ġentire": 2104, + "pping": 2105, + "Ġhistory": 2106, + "Ġlive": 2107, + "ffic": 2108, + "........": 2109, + "ederal": 2110, + "Ġtrying": 2111, + "Ġdiscuss": 2112, + "ĠHar": 2113, + "aces": 2114, + "lished": 2115, + "Ġself": 2116, + "osp": 2117, + "rest": 2118, + "Ġroom": 2119, + "elt": 2120, + "Ġfall": 2121, + "olution": 2122, + "Ġet": 2123, + "Ġx": 2124, + "Ġisn": 2125, + "Ġidea": 2126, + "bo": 2127, + "Ġsound": 2128, + "ĠDep": 2129, + "Ġsomeone": 2130, + "cially": 2131, + "ully": 2132, + "Ġfoc": 2133, + "Ġobject": 2134, + "ift": 2135, + "aper": 2136, + "Ġplayer": 2137, + "Ġrather": 2138, + "Ġservice": 2139, + "ashing": 2140, + "ĠDo": 2141, + "ĠPart": 2142, + "rug": 2143, + "mon": 2144, + "ply": 2145, + "Ġmor": 2146, + "Ġnothing": 2147, + "Ġprovide": 2148, + "IC": 2149, + "ung": 2150, + "Ġparty": 2151, + "Ġexist": 2152, + "Ġmag": 2153, + "70": 2154, + "Ġrul": 2155, + "Ġhouse": 2156, + "Ġbehind": 2157, + "Ġhowever": 2158, + "ĠWorld": 2159, + "Ġsum": 2160, + "Ġapplic": 2161, + "Ġ;": 2162, + "Ġfunction": 2163, + "gr": 2164, + "ĠPol": 2165, + "Ġfront": 2166, + "200": 2167, + "Ġseries": 2168, + "Ġtem": 2169, + "Ġtyp": 2170, + "ills": 2171, + "Ġopt": 2172, + "Ġpoints": 2173, + "Ġbelow": 2174, + "itted": 2175, + "Ġspecific": 2176, + "Ġ2017": 2177, + "umb": 2178, + "Ġra": 2179, + "Ġprevious": 2180, + "Ġpret": 2181, + "reme": 2182, + "Ġcustom": 2183, + "Ġcourt": 2184, + "ĠMe": 2185, + "Ġrepl": 2186, + "Ġwhole": 2187, + "go": 2188, + "cer": 2189, + "Ġtreat": 2190, + "ĠAct": 2191, + "Ġprobably": 2192, + "Ġlearn": 2193, + "ender": 2194, + "ĠAss": 2195, + "Ġversion": 2196, + "now": 2197, + "Ġcheck": 2198, + "ĠCal": 2199, + "RE": 2200, + "minist": 2201, + "On": 2202, + "ources": 2203, + "Ġbenef": 2204, + "Ġdoc": 2205, + "Ġdeter": 2206, + "Ġenc": 2207, + "Ġsuper": 2208, + "Ġaddress": 2209, + "Ġvict": 2210, + "Ġ2013": 2211, + "Ġmeas": 2212, + "tr": 2213, + "Ġfield": 2214, + "When": 2215, + "Ġsignific": 2216, + "uge": 2217, + "Ġfeat": 2218, + "Ġcommon": 2219, + "load": 2220, + "Ġbegin": 2221, + "Ġbring": 2222, + "Ġaction": 2223, + "erman": 2224, + "Ġdescrib": 2225, + "Ġindust": 2226, + "Ġwanted": 2227, + "ried": 2228, + "ming": 2229, + "Ġattempt": 2230, + "45": 2231, + "fer": 2232, + "Ġdue": 2233, + "ression": 2234, + "##": 2235, + "Ġshall": 2236, + "Ġsix": 2237, + "oo": 2238, + "Ġstep": 2239, + "Ġpub": 2240, + "Ġhimself": 2241, + "Ġ23": 2242, + "Ġcop": 2243, + "Ġdest": 2244, + "Ġstop": 2245, + "AC": 2246, + "ibility": 2247, + "Ġlab": 2248, + "icult": 2249, + "Ġhours": 2250, + "Ġcreate": 2251, + "Ġfurther": 2252, + "ĠAmerica": 2253, + "ĠCity": 2254, + "Ġdou": 2255, + "head": 2256, + "ST": 2257, + "ĠNorth": 2258, + "cing": 2259, + "Ġnational": 2260, + "ule": 2261, + "ĠInst": 2262, + "Ġtaking": 2263, + "ĠQu": 2264, + "irt": 2265, + "Ġred": 2266, + "Ġresearch": 2267, + "viron": 2268, + "ĠGe": 2269, + "Ġbreak": 2270, + "ana": 2271, + "Ġspace": 2272, + "aterial": 2273, + "Ġrecent": 2274, + "ĠAb": 2275, + "Ġgeneral": 2276, + "Ġhit": 2277, + "Ġperiod": 2278, + "Ġeverything": 2279, + "ively": 2280, + "Ġphys": 2281, + "Ġsaying": 2282, + "anks": 2283, + "Ġcou": 2284, + "Ġcult": 2285, + "aced": 2286, + "eal": 2287, + "uation": 2288, + "Ġcoun": 2289, + "lu": 2290, + "Ġinclude": 2291, + "Ġposition": 2292, + "ĠAfter": 2293, + "ĠCanad": 2294, + "ĠEm": 2295, + "Ġimm": 2296, + "ĠRed": 2297, + "Ġpick": 2298, + "Ġcompl": 2299, + "Ġmatter": 2300, + "reg": 2301, + "ext": 2302, + "angu": 2303, + "isc": 2304, + "ole": 2305, + "aut": 2306, + "Ġcompet": 2307, + "eed": 2308, + "fect": 2309, + "Ġ21": 2310, + "ĠSen": 2311, + "ĠThese": 2312, + "asing": 2313, + "Ġcannot": 2314, + "Ġinit": 2315, + "Ġrelations": 2316, + "ached": 2317, + "Ġbar": 2318, + "Ġ40": 2319, + "ĠTH": 2320, + "Ġ2012": 2321, + "Ġvol": 2322, + "Ġground": 2323, + "Ġsecurity": 2324, + "Ġupd": 2325, + "ilt": 2326, + "35": 2327, + "Ġconcern": 2328, + "ĠJust": 2329, + "Ġwhite": 2330, + "Ġseems": 2331, + "ĠHer": 2332, + "pecially": 2333, + "ients": 2334, + "Ġannoun": 2335, + "Ġfig": 2336, + "ights": 2337, + "Ġstri": 2338, + "like": 2339, + "ids": 2340, + "Ġsus": 2341, + "Ġwatch": 2342, + "Ġâ": 2343, + "Ġwind": 2344, + "ĠCont": 2345, + "Ġitself": 2346, + "Ġmass": 2347, + "Al": 2348, + "yle": 2349, + "ique": 2350, + "ĠNational": 2351, + "Ġabs": 2352, + "Ġpack": 2353, + "Ġoutside": 2354, + "Ġanim": 2355, + "Ġpain": 2356, + "eter": 2357, + "Ġmanag": 2358, + "duct": 2359, + "ogn": 2360, + "Ġ]": 2361, + "ĠSept": 2362, + "sec": 2363, + "off": 2364, + "ĠJan": 2365, + "Ġfoot": 2366, + "ades": 2367, + "Ġthird": 2368, + "Ġmot": 2369, + "Ġevidence": 2370, + "inton": 2371, + "Ġthreat": 2372, + "apt": 2373, + "ples": 2374, + "cle": 2375, + "Ġlo": 2376, + "Ġdecl": 2377, + "Ġitem": 2378, + "medi": 2379, + "Ġrepresent": 2380, + "omb": 2381, + "amer": 2382, + "Ġsignificant": 2383, + "ograph": 2384, + "su": 2385, + "Ġcal": 2386, + "ires": 2387, + "0000": 2388, + "ID": 2389, + "AM": 2390, + "Ġsimply": 2391, + "Ġlonger": 2392, + "Ġfile": 2393, + "OT": 2394, + "che": 2395, + "So": 2396, + "ateg": 2397, + "org": 2398, + "ĠHis": 2399, + "Ġener": 2400, + "Ġdom": 2401, + "Ġupon": 2402, + "ili": 2403, + "\":\"": 2404, + "Ġthemselves": 2405, + "Ġcoming": 2406, + "Ġquite": 2407, + "Ġdifficult": 2408, + "ĠBar": 2409, + "ilities": 2410, + "rel": 2411, + "ends": 2412, + "cial": 2413, + "64": 2414, + "Ġwoman": 2415, + "rap": 2416, + "yr": 2417, + "Ġnecess": 2418, + "ips": 2419, + "Ġtext": 2420, + "Ġrequire": 2421, + "Ġmilitary": 2422, + "Ġreview": 2423, + "Ġrespons": 2424, + "75": 2425, + "Ġsubject": 2426, + "Ġinstead": 2427, + "Ġissues": 2428, + "Ġgen": 2429, + "\",\"": 2430, + "Ġminutes": 2431, + "Ġweap": 2432, + "ray": 2433, + "amed": 2434, + "time": 2435, + "bl": 2436, + "How": 2437, + "Ġcode": 2438, + "ĠSm": 2439, + "Ġhigher": 2440, + "ĠSte": 2441, + "ris": 2442, + "Ġpage": 2443, + "Ġstudents": 2444, + "ĠIntern": 2445, + "Ġmethod": 2446, + "ĠAug": 2447, + "ĠPer": 2448, + "ĠAg": 2449, + "Ġpolicy": 2450, + "ĠSw": 2451, + "Ġexec": 2452, + "Ġaccept": 2453, + "ume": 2454, + "ribut": 2455, + "Ġwords": 2456, + "Ġfinal": 2457, + "Ġchanges": 2458, + "ĠDemocr": 2459, + "Ġfriends": 2460, + "Ġrespect": 2461, + "Ġep": 2462, + "Ġcompan": 2463, + "ivil": 2464, + "Ġdamage": 2465, + "****": 2466, + "ogle": 2467, + "vironment": 2468, + "Ġneg": 2469, + "ental": 2470, + "Ġap": 2471, + "Ġtotal": 2472, + "ival": 2473, + "!\"": 2474, + "lim": 2475, + "Ġneeds": 2476, + "Ġagre": 2477, + "Ġdevelopment": 2478, + "Ġage": 2479, + "iple": 2480, + "21": 2481, + "Ġresults": 2482, + "ĠAf": 2483, + "Sh": 2484, + "Ġgun": 2485, + "ĠObama": 2486, + "roll": 2487, + "Ġ@": 2488, + "Ġrights": 2489, + "ĠBrit": 2490, + "Ġrunning": 2491, + "Ġwasn": 2492, + "Ġport": 2493, + "Ġrate": 2494, + "Ġpretty": 2495, + "Ġtarget": 2496, + "Ġsaw": 2497, + "Ġcirc": 2498, + "Ġworks": 2499, + "icro": 2500, + "alt": 2501, + "over": 2502, + "www": 2503, + "That": 2504, + "lier": 2505, + "Ġeveryone": 2506, + "ude": 2507, + "Ġpie": 2508, + "iddle": 2509, + "rael": 2510, + "Ġrad": 2511, + "Ġblock": 2512, + "Ġwalk": 2513, + "To": 2514, + "ãģ": 2515, + "nes": 2516, + "ĠAust": 2517, + "aul": 2518, + "rote": 2519, + "ĠSouth": 2520, + "ession": 2521, + "oph": 2522, + "Ġshows": 2523, + "Ġsite": 2524, + "Ġjo": 2525, + "Ġrisk": 2526, + "clus": 2527, + "lt": 2528, + "Ġinj": 2529, + "iding": 2530, + "ĠSpe": 2531, + "Ġchall": 2532, + "irm": 2533, + "Ġ22": 2534, + "itting": 2535, + "str": 2536, + "Ġhy": 2537, + "LE": 2538, + "key": 2539, + "Ġbegan": 2540, + "atur": 2541, + "ashington": 2542, + "lam": 2543, + "ĠDav": 2544, + "bit": 2545, + "Ġsize": 2546, + "ĠPar": 2547, + "38": 2548, + "ournal": 2549, + "face": 2550, + "Ġdecision": 2551, + "Ġlarg": 2552, + "Ġjud": 2553, + "rect": 2554, + "Ġcontinue": 2555, + "ĠOct": 2556, + "overed": 2557, + "ĠInt": 2558, + "========": 2559, + "Ġparent": 2560, + "ĠWill": 2561, + "Ġeasy": 2562, + "Ġdrug": 2563, + "anger": 2564, + "Ġsense": 2565, + "Ġdi": 2566, + "iday": 2567, + "Ġenergy": 2568, + "istic": 2569, + "Ġassoci": 2570, + "arter": 2571, + "obal": 2572, + "eks": 2573, + "ĠEl": 2574, + "urch": 2575, + "Ġgirl": 2576, + "oe": 2577, + "itle": 2578, + "Ġ28": 2579, + "ĠChe": 2580, + "Ġrequest": 2581, + "Ġsoon": 2582, + "Ġhost": 2583, + "ky": 2584, + "Ġstates": 2585, + "omes": 2586, + "Ġmaterial": 2587, + "lex": 2588, + "Ġmoment": 2589, + "Ġansw": 2590, + "onse": 2591, + "Ġespecially": 2592, + "Ġnorm": 2593, + "Ġservices": 2594, + "pite": 2595, + "ran": 2596, + "Ġrole": 2597, + "44": 2598, + "):": 2599, + "Ġcred": 2600, + "Cl": 2601, + "________": 2602, + "Ġmat": 2603, + "Ġlog": 2604, + "ĠClinton": 2605, + "OU": 2606, + "Ġoffice": 2607, + "Ġ26": 2608, + "Ġcharg": 2609, + "Ġtrack": 2610, + "ma": 2611, + "Ġheart": 2612, + "Ġball": 2613, + "Ġpersonal": 2614, + "Ġbuilding": 2615, + "na": 2616, + "set": 2617, + "body": 2618, + "ĠBlack": 2619, + "Ġincrease": 2620, + "itten": 2621, + "Ġneeded": 2622, + "36": 2623, + "32": 2624, + "=\"": 2625, + "Ġlost": 2626, + "Ġbecame": 2627, + "Ġgroups": 2628, + "ĠMus": 2629, + "Ġwrote": 2630, + "ĠPe": 2631, + "Ġprop": 2632, + "joy": 2633, + "é": 2634, + "ĠWhite": 2635, + "Ġdead": 2636, + ".'": 2637, + "Ġhttp": 2638, + "Ġwebs": 2639, + "OS": 2640, + "Ġinside": 2641, + "Ġwrong": 2642, + "Ġstatement": 2643, + "Ġ...": 2644, + "yl": 2645, + "Ġfilm": 2646, + "Ġmusic": 2647, + "Ġshare": 2648, + "ification": 2649, + "Ġrelease": 2650, + "Ġforward": 2651, + "Ġstay": 2652, + "Ġcomput": 2653, + "itte": 2654, + "ser": 2655, + "Ġoriginal": 2656, + "Ġcard": 2657, + "Ġcand": 2658, + "Ġdiv": 2659, + "atural": 2660, + "Ġfavor": 2661, + "OM": 2662, + "Ġcases": 2663, + "uses": 2664, + "Ġsection": 2665, + "Ġleave": 2666, + "ging": 2667, + "oved": 2668, + "ĠWashington": 2669, + "39": 2670, + "ĠGl": 2671, + "Ġrequired": 2672, + "action": 2673, + "apan": 2674, + "oor": 2675, + "iter": 2676, + "ĠKing": 2677, + "Ġcountries": 2678, + "ĠGerman": 2679, + "lling": 2680, + "Ġ27": 2681, + "34": 2682, + "Ġquestions": 2683, + "Ġprim": 2684, + "Ġcell": 2685, + "Ġshoot": 2686, + "Ġanyone": 2687, + "ĠWest": 2688, + "Ġaffect": 2689, + "epend": 2690, + "Ġonline": 2691, + "ĠIsrael": 2692, + "ĠSeptember": 2693, + "Ġability": 2694, + "Ġcontent": 2695, + "ises": 2696, + "Ġreve": 2697, + "Ġlaun": 2698, + "Ġindic": 2699, + "Ġforce": 2700, + "cast": 2701, + "Ġsold": 2702, + "aving": 2703, + "fl": 2704, + "Ġsoft": 2705, + "Ġcompanies": 2706, + "ceed": 2707, + "Ġarticle": 2708, + "Ġaud": 2709, + "Ġrev": 2710, + "Ġeduc": 2711, + "Ġplaying": 2712, + "05": 2713, + "Ġheld": 2714, + "ctor": 2715, + "Ġreleased": 2716, + "Ġfederal": 2717, + "37": 2718, + "Ġadminist": 2719, + "Ġinterview": 2720, + "Ġinstall": 2721, + "Ġreceived": 2722, + "Ġsource": 2723, + "uk": 2724, + "Ph": 2725, + "Ġserious": 2726, + "Ġcreated": 2727, + "Ġcause": 2728, + "Ġimmedi": 2729, + "Ġdefin": 2730, + "uel": 2731, + "ĠDepartment": 2732, + "ctions": 2733, + "ĠCour": 2734, + "ĠNow": 2735, + "ze": 2736, + "ites": 2737, + "itution": 2738, + "Ġlate": 2739, + "Ġspeak": 2740, + "ners": 2741, + "Ġlegal": 2742, + "ari": 2743, + "ĠCor": 2744, + "Ġweeks": 2745, + "Ġmodel": 2746, + "Ġpred": 2747, + "Ġexact": 2748, + "BC": 2749, + "ĠBy": 2750, + "ING": 2751, + "osing": 2752, + "Ġtakes": 2753, + "Ġregard": 2754, + "Ġopportun": 2755, + "Ġprice": 2756, + "Ġ198": 2757, + "ĠApr": 2758, + "fully": 2759, + "Ġord": 2760, + "Ġproblems": 2761, + "ruction": 2762, + "ham": 2763, + "ĠCount": 2764, + "lege": 2765, + "Ġleaders": 2766, + "ET": 2767, + "lev": 2768, + "Ġdeep": 2769, + "ological": 2770, + "ese": 2771, + "haps": 2772, + "ĠSome": 2773, + "Ġpers": 2774, + "Ġcontract": 2775, + "Ġrelationship": 2776, + "sp": 2777, + "oud": 2778, + "Ġbase": 2779, + "48": 2780, + "mit": 2781, + "Ad": 2782, + "ancial": 2783, + "Ġconsum": 2784, + "Ġpotential": 2785, + "Ġlangu": 2786, + "rem": 2787, + "eth": 2788, + "Ġrelig": 2789, + "ressed": 2790, + "66": 2791, + "Ġlink": 2792, + "Ġlower": 2793, + "ayer": 2794, + "ĠJune": 2795, + "Ġfem": 2796, + "unt": 2797, + "erc": 2798, + "urd": 2799, + "Ġcontact": 2800, + "Ġill": 2801, + "Ġmother": 2802, + "Ġestab": 2803, + "htt": 2804, + "ĠMarch": 2805, + "ĠBro": 2806, + "ĠChina": 2807, + "Ġ29": 2808, + "Ġsqu": 2809, + "Ġprovided": 2810, + "Ġaverage": 2811, + "asons": 2812, + "Ġ2011": 2813, + "Ġexam": 2814, + "lin": 2815, + "55": 2816, + "ned": 2817, + "Ġperfect": 2818, + "Ġtou": 2819, + "alse": 2820, + "ux": 2821, + "Ġbuy": 2822, + "Ġshot": 2823, + "Ġcollect": 2824, + "Ġphot": 2825, + "Ġplayed": 2826, + "Ġsurpr": 2827, + "Ġofficials": 2828, + "Ġsimple": 2829, + "avy": 2830, + "Ġindustry": 2831, + "Ġhands": 2832, + "ground": 2833, + "Ġpull": 2834, + "Ġround": 2835, + "Ġuser": 2836, + "Ġrange": 2837, + "uary": 2838, + "Ġprivate": 2839, + "ops": 2840, + "ees": 2841, + "Ġways": 2842, + "ĠMich": 2843, + "Ġveh": 2844, + "Ġexcept": 2845, + "Ġterms": 2846, + "imum": 2847, + "pper": 2848, + "ION": 2849, + "ores": 2850, + "ĠDragon": 2851, + "oul": 2852, + "Ġden": 2853, + "Ġperformance": 2854, + "Ġbill": 2855, + "cil": 2856, + "47": 2857, + "Ġenvironment": 2858, + "Ġexc": 2859, + "add": 2860, + "Ġworth": 2861, + "Ġpict": 2862, + "Ġchance": 2863, + "Ġ2018": 2864, + "bor": 2865, + "Ġspeed": 2866, + "iction": 2867, + "Ġalleg": 2868, + "ĠJapan": 2869, + "atory": 2870, + "reet": 2871, + "Ġmatch": 2872, + "ĠII": 2873, + "Ġstru": 2874, + "order": 2875, + "Ġste": 2876, + "Ġliving": 2877, + "Ġstruct": 2878, + "ino": 2879, + "Ġsepar": 2880, + "hern": 2881, + "Ġresponse": 2882, + "Ġenjoy": 2883, + "Ġvia": 2884, + "AD": 2885, + "uments": 2886, + "acebook": 2887, + "Ġmember": 2888, + "ibr": 2889, + "izing": 2890, + "Ġtool": 2891, + "ĠMon": 2892, + "ĠWhile": 2893, + "hood": 2894, + "ĠAng": 2895, + "ĠDef": 2896, + "Ġoffer": 2897, + "Tr": 2898, + "aur": 2899, + "Ġturned": 2900, + "ĠJuly": 2901, + "down": 2902, + "anced": 2903, + "Ġrecently": 2904, + "ĠEar": 2905, + "Ġce": 2906, + "ĠStar": 2907, + "ĠCong": 2908, + "rought": 2909, + "Ġblood": 2910, + "Ġhope": 2911, + "Ġcomment": 2912, + "aint": 2913, + "Ġarri": 2914, + "iles": 2915, + "Ġparticip": 2916, + "ought": 2917, + "ription": 2918, + "08": 2919, + "49": 2920, + "Ġgave": 2921, + "Ġselect": 2922, + "Ġkilled": 2923, + "sych": 2924, + "Ġgoes": 2925, + "ij": 2926, + "Ġcoll": 2927, + "Ġimpact": 2928, + "atives": 2929, + "ĠSer": 2930, + "09": 2931, + "ĠAugust": 2932, + "Ġboy": 2933, + "de": 2934, + "ĠDes": 2935, + "Ġfelt": 2936, + "US": 2937, + "Ġexpected": 2938, + "Ġimage": 2939, + "ĠMark": 2940, + "ccording": 2941, + "oice": 2942, + "EC": 2943, + "ĠMag": 2944, + "ened": 2945, + "hold": 2946, + "ĠPost": 2947, + "Ġprevent": 2948, + "No": 2949, + "Ġinvolved": 2950, + "Ġeyes": 2951, + "Ġquickly": 2952, + "At": 2953, + "unk": 2954, + "Ġbehav": 2955, + "Ġur": 2956, + "Ġled": 2957, + "come": 2958, + "ey": 2959, + "Ġcandid": 2960, + "Ġearlier": 2961, + "Ġfocus": 2962, + "ety": 2963, + "Pro": 2964, + "ledge": 2965, + "ixed": 2966, + "illed": 2967, + "Ġpopular": 2968, + "AP": 2969, + "Ġsett": 2970, + "light": 2971, + "Ġvarious": 2972, + "inks": 2973, + "Ġlevels": 2974, + "Ġroad": 2975, + "ellig": 2976, + "ables": 2977, + "hel": 2978, + "ittee": 2979, + "ĠGener": 2980, + "ype": 2981, + "Ġheard": 2982, + "icles": 2983, + "Ġmis": 2984, + "Ġusers": 2985, + "ĠSan": 2986, + "Ġimprove": 2987, + "Ġfather": 2988, + "Ġsearch": 2989, + "They": 2990, + "vil": 2991, + "Ġprofess": 2992, + "Ġknew": 2993, + "Ġloss": 2994, + "Ġevents": 2995, + "65": 2996, + "Ġbillion": 2997, + "07": 2998, + "02": 2999, + "ĠNews": 3000, + "ĠAM": 3001, + "Ġcover": 3002, + "where": 3003, + "ension": 3004, + "Ġbott": 3005, + "Ġareas": 3006, + "ences": 3007, + "ope": 3008, + "ĠTwitter": 3009, + "ael": 3010, + "Ġgets": 3011, + "ĠGoogle": 3012, + "Ġsn": 3013, + "iant": 3014, + "Ġvote": 3015, + "Ġnearly": 3016, + "Ġincluded": 3017, + "Ġrecogn": 3018, + "zz": 3019, + "mm": 3020, + "aled": 3021, + "Ġhappened": 3022, + "04": 3023, + "Ġhot": 3024, + "Ġwhose": 3025, + "Ġcivil": 3026, + "Ġsuff": 3027, + "oes": 3028, + "itiz": 3029, + "ĠSyri": 3030, + "Ġrespond": 3031, + "Ġhon": 3032, + "Ġfeatures": 3033, + "Ġeconomic": 3034, + "ĠApril": 3035, + "rim": 3036, + "Ġtechnology": 3037, + "Ġoption": 3038, + "aging": 3039, + "Ġpurch": 3040, + "Re": 3041, + "Ġlat": 3042, + "chie": 3043, + "isl": 3044, + "Ġrecomm": 3045, + "uf": 3046, + "Ġtraining": 3047, + "Ġeffects": 3048, + "Ġfast": 3049, + "Ġ2010": 3050, + "Ġoccur": 3051, + "Ġwebsite": 3052, + "Ġemail": 3053, + "Ġsens": 3054, + "ech": 3055, + "Ġoil": 3056, + "Ġinflu": 3057, + "Ġcurrently": 3058, + "ĠSch": 3059, + "ĠAdd": 3060, + "Ġgoal": 3061, + "Ġscient": 3062, + "Ġconv": 3063, + "100": 3064, + "emy": 3065, + "Ġdecided": 3066, + "Ġtravel": 3067, + "Ġmention": 3068, + "LL": 3069, + "03": 3070, + "Ġelection": 3071, + "Ġphone": 3072, + "Ġlooks": 3073, + "Ġsituation": 3074, + "Ġcy": 3075, + "Ġhor": 3076, + "bed": 3077, + "ĠCourt": 3078, + "aily": 3079, + "aves": 3080, + "Ġquality": 3081, + "ĠComp": 3082, + "wise": 3083, + "Ġtable": 3084, + "Ġstaff": 3085, + "ĠWind": 3086, + "ett": 3087, + "Ġtried": 3088, + "idered": 3089, + "Ġaddition": 3090, + "Ġbox": 3091, + "Ġlack": 3092, + "arily": 3093, + "Ġwide": 3094, + "Ġmid": 3095, + "Ġboard": 3096, + "ysis": 3097, + "Ġanti": 3098, + "ha": 3099, + "Ġdig": 3100, + "ening": 3101, + "Ġdro": 3102, + "Con": 3103, + "68": 3104, + "Ġslow": 3105, + "based": 3106, + "sequ": 3107, + "Ġpath": 3108, + "Ex": 3109, + "aker": 3110, + "Ġworked": 3111, + "Ġpen": 3112, + "Ġengine": 3113, + "Ġlooked": 3114, + "ĠSuper": 3115, + "ĠServ": 3116, + "Ġvictim": 3117, + "Un": 3118, + "Ġproperty": 3119, + "Ġintrodu": 3120, + "Ġexecut": 3121, + "ĠPM": 3122, + "Le": 3123, + "Ġcolor": 3124, + "ĠMore": 3125, + "Ġ60": 3126, + "Ġnetwork": 3127, + "Ġdate": 3128, + "cul": 3129, + "idge": 3130, + "Ġextra": 3131, + "31": 3132, + "Ġsle": 3133, + "67": 3134, + "Ġwond": 3135, + "Ġreports": 3136, + "just": 3137, + "ĠAustral": 3138, + "Ġcapital": 3139, + "Ġens": 3140, + "Ġcommand": 3141, + "Ġallowed": 3142, + "Ġprep": 3143, + "Ġcapt": 3144, + "hib": 3145, + "Ġnumbers": 3146, + "chan": 3147, + "Ġfair": 3148, + "mp": 3149, + "oms": 3150, + "Ġreach": 3151, + "With": 3152, + "tain": 3153, + "Ġbroad": 3154, + "Ġcouple": 3155, + "ecause": 3156, + "lying": 3157, + "ĠFeb": 3158, + "Ġscreen": 3159, + "Ġlives": 3160, + "Ġprior": 3161, + "ĠCongress": 3162, + "Ar": 3163, + "Ġapproach": 3164, + "Ġemer": 3165, + "aries": 3166, + "ĠDis": 3167, + "serv": 3168, + "ĠNe": 3169, + "Ġbuilt": 3170, + "cies": 3171, + "Ġrepe": 3172, + "Ġrules": 3173, + "force": 3174, + "ĠPal": 3175, + "Ġfinancial": 3176, + "Ġconsidered": 3177, + "ĠChar": 3178, + "nces": 3179, + "ĠIS": 3180, + "Ġbrought": 3181, + "Ġbi": 3182, + "iers": 3183, + "ĠSim": 3184, + "OP": 3185, + "Ġproducts": 3186, + "Ġvisit": 3187, + "Ġdocument": 3188, + "Ġconduct": 3189, + "Ġcompletely": 3190, + "ining": 3191, + "ĠCalif": 3192, + "ibly": 3193, + "Ġwritten": 3194, + "ĠTV": 3195, + "ements": 3196, + "Ġdraw": 3197, + "One": 3198, + "Ġpublished": 3199, + "Ġsecret": 3200, + "rain": 3201, + "het": 3202, + "ĠFacebook": 3203, + "onday": 3204, + "ĠUp": 3205, + "Ġsexual": 3206, + "Ġthous": 3207, + "ĠPat": 3208, + "Ġess": 3209, + "Ġstandard": 3210, + "Ġarm": 3211, + "ges": 3212, + "ection": 3213, + "Ġfell": 3214, + "Ġforeign": 3215, + "ani": 3216, + "ĠFriday": 3217, + "Ġregular": 3218, + "inary": 3219, + "Ġincreased": 3220, + "Ġusually": 3221, + "Ġdemon": 3222, + "Ġdark": 3223, + "Ġadditional": 3224, + "rol": 3225, + "ĠOf": 3226, + "Ġproduction": 3227, + "!!": 3228, + "undred": 3229, + "Ġinternational": 3230, + "idents": 3231, + "ĠFree": 3232, + "roup": 3233, + "Ġrace": 3234, + "Ġmach": 3235, + "Ġhuge": 3236, + "All": 3237, + "lear": 3238, + "ovember": 3239, + "Ġtown": 3240, + "Ġattention": 3241, + "ĠOff": 3242, + "yond": 3243, + "ĠThen": 3244, + "field": 3245, + "Ġterror": 3246, + "raz": 3247, + "ĠBo": 3248, + "Ġmeeting": 3249, + "ĠPark": 3250, + "Ġarrest": 3251, + "Ġfear": 3252, + "Ġaw": 3253, + "ĠVal": 3254, + "oring": 3255, + "',": 3256, + "Ġextreme": 3257, + "arr": 3258, + "Ġworkers": 3259, + "After": 3260, + "Ġ31": 3261, + "net": 3262, + "ament": 3263, + "Ġdirectly": 3264, + "Ġpopulation": 3265, + "ube": 3266, + "ĠOctober": 3267, + "ĠIN": 3268, + "ĠJanuary": 3269, + "59": 3270, + "ĠDavid": 3271, + "Ġcross": 3272, + "cember": 3273, + "ĠFirst": 3274, + "Ġmessage": 3275, + "irit": 3276, + "Ġnation": 3277, + "Ġpoll": 3278, + "isions": 3279, + "Ġanswer": 3280, + "ny": 3281, + "isode": 3282, + "Ġcarry": 3283, + "ĠRussia": 3284, + "Ġhear": 3285, + "ength": 3286, + "roy": 3287, + "Ġnatural": 3288, + "inally": 3289, + "Ġdog": 3290, + "mitted": 3291, + "Ġtrade": 3292, + "Ġsubst": 3293, + "Ġmultiple": 3294, + "ĠAfric": 3295, + "Ġfans": 3296, + "Ġsort": 3297, + "Ġglobal": 3298, + "ication": 3299, + "ĠWed": 3300, + "ara": 3301, + "Ġachie": 3302, + "Ġlanguage": 3303, + "vey": 3304, + "Ġtal": 3305, + "Ġnecessary": 3306, + "Ġdetails": 3307, + "Ġsen": 3308, + "ĠSund": 3309, + "ĠReg": 3310, + "ĠRec": 3311, + "06": 3312, + "Ġsil": 3313, + "ressive": 3314, + "Ġmedical": 3315, + "unch": 3316, + "ornia": 3317, + "Ġund": 3318, + "fort": 3319, + "ocks": 3320, + "ĠMonday": 3321, + "uesday": 3322, + "craft": 3323, + "77": 3324, + "urt": 3325, + "Ġver": 3326, + "ĠHill": 3327, + "Ġreceive": 3328, + "Ġmorning": 3329, + "estern": 3330, + "Ġbank": 3331, + "Ġsat": 3332, + "irth": 3333, + "ĠHigh": 3334, + "Ġdevice": 3335, + "ĠTHE": 3336, + "ĠCenter": 3337, + "Ġsafe": 3338, + "Ġple": 3339, + "ĠCanada": 3340, + "Ġsystems": 3341, + "Ġassist": 3342, + "Ġsurv": 3343, + "Ġbattle": 3344, + "ĠSoc": 3345, + "vertis": 3346, + "She": 3347, + "Ġpaper": 3348, + "Ġgrowth": 3349, + "Ġcast": 3350, + "Sc": 3351, + "Ġplans": 3352, + "lled": 3353, + "Ġparts": 3354, + "Ġwall": 3355, + "Ġmovement": 3356, + "Ġpractice": 3357, + "imately": 3358, + "Ġdisplay": 3359, + "Ġsometimes": 3360, + "omp": 3361, + "ĠPaul": 3362, + "ĠYes": 3363, + "king": 3364, + "58": 3365, + "oly": 3366, + "Ġson": 3367, + "Ġavoid": 3368, + "okes": 3369, + "ĠJew": 3370, + "Ġtowards": 3371, + "asc": 3372, + "Ġ//": 3373, + "ĠKore": 3374, + "Ġtalking": 3375, + "Ġcorrect": 3376, + "Ġspent": 3377, + "icks": 3378, + "iable": 3379, + "eared": 3380, + "Ġterm": 3381, + "Ġwants": 3382, + "oming": 3383, + "Ġut": 3384, + "Ġdoub": 3385, + "Ġforces": 3386, + "Ġplease": 3387, + "69": 3388, + "ĠNovember": 3389, + "atform": 3390, + "ondon": 3391, + "Ġones": 3392, + "Ġimmediately": 3393, + "ĠRussian": 3394, + "ĠMet": 3395, + "Ġdeg": 3396, + "Ġparents": 3397, + "CH": 3398, + "ĠAmericans": 3399, + "aly": 3400, + "ĠMod": 3401, + "Ġshown": 3402, + "Ġconditions": 3403, + "Ġstuff": 3404, + "Ġreb": 3405, + "ĠYour": 3406, + "Ġincludes": 3407, + "nown": 3408, + "ĠSam": 3409, + "Ġexperien": 3410, + "mission": 3411, + "ĠEven": 3412, + "aught": 3413, + "Ġannounced": 3414, + "ĠRepublican": 3415, + "Ġdetermin": 3416, + "Ġdescribed": 3417, + "ĠCounty": 3418, + "()": 3419, + "Ġdoor": 3420, + "Ġchanged": 3421, + "Ġneigh": 3422, + "ĠHere": 3423, + "Ġclean": 3424, + "Ġpan": 3425, + "ĠDecember": 3426, + "ĠEuropean": 3427, + "iring": 3428, + "apter": 3429, + "Ġclub": 3430, + "ĠTuesday": 3431, + "Ġpaid": 3432, + "ĠNet": 3433, + "Ġattacks": 3434, + "Ġcharacters": 3435, + "Ġalone": 3436, + "Ġdirector": 3437, + "dom": 3438, + "Ġ35": 3439, + "Ġload": 3440, + "Ġrout": 3441, + "ĠCalifornia": 3442, + "Ġfinally": 3443, + "Ġrac": 3444, + "Ġcontr": 3445, + "Ġexactly": 3446, + "resh": 3447, + "pri": 3448, + "ĠIslam": 3449, + "Ġnature": 3450, + "Ġcareer": 3451, + "Ġlatest": 3452, + "Ġconvers": 3453, + "ĠSl": 3454, + "pose": 3455, + "cient": 3456, + "ĠInc": 3457, + "ivity": 3458, + "88": 3459, + "ĠAtt": 3460, + "ĠMor": 3461, + "nesday": 3462, + "Ġweight": 3463, + "ken": 3464, + "Ġnote": 3465, + "Ġteams": 3466, + "Ġ\\": 3467, + "airs": 3468, + "ĠGreen": 3469, + "Ġhundred": 3470, + "onent": 3471, + "Ġstreng": 3472, + "Ġconsist": 3473, + "icated": 3474, + "Ġregul": 3475, + "Ġlic": 3476, + "astic": 3477, + "Ġten": 3478, + "ursday": 3479, + "elligence": 3480, + "ously": 3481, + "ĠUK": 3482, + "BI": 3483, + "Ġcosts": 3484, + "Ġindepend": 3485, + "ĠAP": 3486, + "Ġnormal": 3487, + "Ġhom": 3488, + "Ġobvious": 3489, + "Ġswe": 3490, + "Ġstar": 3491, + "Ġready": 3492, + "acher": 3493, + "Ġimplement": 3494, + "gest": 3495, + "Ġsong": 3496, + "ĠGet": 3497, + "ĠLab": 3498, + "Ġinteresting": 3499, + "using": 3500, + "Ġgiving": 3501, + "ĠSunday": 3502, + "Ġetc": 3503, + "Ġmiddle": 3504, + "Ġremember": 3505, + "right": 3506, + "osition": 3507, + "utions": 3508, + "Ġmax": 3509, + "46": 3510, + "Ġyourself": 3511, + "Ġdemand": 3512, + "Ġtreatment": 3513, + "Ġdanger": 3514, + "ĠCons": 3515, + "Ġguy": 3516, + "ĠBritish": 3517, + "Ġphysical": 3518, + "Ġrelated": 3519, + "Ġremain": 3520, + "Ġcouldn": 3521, + "Ġrefer": 3522, + "Ġcitiz": 3523, + "box": 3524, + "ENT": 3525, + "board": 3526, + "Ġinn": 3527, + "IG": 3528, + "ero": 3529, + "ĠStreet": 3530, + "ospital": 3531, + "rench": 3532, + "chers": 3533, + "Ġstra": 3534, + "OL": 3535, + "ager": 3536, + "ĠAN": 3537, + "Ġeasily": 3538, + "IA": 3539, + "enge": 3540, + "iny": 3541, + "Ġclos": 3542, + "ocked": 3543, + "Ġuses": 3544, + "ĠCoun": 3545, + "Im": 3546, + "uild": 3547, + "??": 3548, + "more": 3549, + "Ġang": 3550, + "Ġwrite": 3551, + "olute": 3552, + "57": 3553, + "Ġleader": 3554, + "Ġreading": 3555, + "": 3784, + "Ġfigure": 3785, + "Ġdisapp": 3786, + "enty": 3787, + "Ġsoftware": 3788, + "Ġult": 3789, + "Ġofficers": 3790, + "New": 3791, + "Is": 3792, + "Ġremains": 3793, + "ĠIndia": 3794, + "Ġpsych": 3795, + "rief": 3796, + "Ġcat": 3797, + "esc": 3798, + "Ġobserv": 3799, + "Ġstage": 3800, + "ĠDark": 3801, + "Ġenter": 3802, + "change": 3803, + "Ġpassed": 3804, + "Ġdespite": 3805, + "ĠOut": 3806, + "Ġmovie": 3807, + "rs": 3808, + "Ġvoice": 3809, + "mine": 3810, + "ĠPlay": 3811, + "Ġtoward": 3812, + "ĠTer": 3813, + "Ġregion": 3814, + "Ġvalues": 3815, + "orters": 3816, + "Ġmount": 3817, + "Ġofficer": 3818, + "ĠOther": 3819, + "ban": 3820, + "Ġhous": 3821, + "wood": 3822, + "room": 3823, + "IV": 3824, + "ĠSun": 3825, + "see": 3826, + "ĠOver": 3827, + "rog": 3828, + "90": 3829, + "Ġlay": 3830, + "ĠTur": 3831, + "awn": 3832, + "Ġpressure": 3833, + "ĠSub": 3834, + "Ġbooks": 3835, + "edom": 3836, + "ĠSand": 3837, + "AA": 3838, + "ago": 3839, + "Ġreasons": 3840, + "ford": 3841, + "Ġactivity": 3842, + "UT": 3843, + "Now": 3844, + "ĠSenate": 3845, + "cell": 3846, + "night": 3847, + "Ġcalls": 3848, + "inter": 3849, + "Ġletter": 3850, + "ĠRob": 3851, + "ĠJe": 3852, + "Ġchoose": 3853, + "ĠLaw": 3854, + "Get": 3855, + "Be": 3856, + "Ġrob": 3857, + "Ġtypes": 3858, + "Ġplatform": 3859, + "Ġquarter": 3860, + "RA": 3861, + "ĠTime": 3862, + "Ġmaybe": 3863, + "ĠCr": 3864, + "95": 3865, + "pre": 3866, + "Ġmoving": 3867, + "Ġlif": 3868, + "Ġgold": 3869, + "Ġsom": 3870, + "Ġpatients": 3871, + "Ġtruth": 3872, + "ĠKe": 3873, + "urance": 3874, + "antly": 3875, + "mar": 3876, + "Ġcharge": 3877, + "ĠGreat": 3878, + "Ġcele": 3879, + "--------------------------------": 3880, + "Ġrock": 3881, + "roid": 3882, + "ancy": 3883, + "Ġcredit": 3884, + "aud": 3885, + "By": 3886, + "ĠEvery": 3887, + "Ġmoved": 3888, + "inger": 3889, + "ribution": 3890, + "Ġnames": 3891, + "Ġstraight": 3892, + "ĠHealth": 3893, + "ĠWell": 3894, + "Ġfeature": 3895, + "Ġrule": 3896, + "Ġsche": 3897, + "inated": 3898, + "ĠMichael": 3899, + "berg": 3900, + "41": 3901, + "iled": 3902, + "band": 3903, + "Ġclick": 3904, + "ĠAngel": 3905, + "onents": 3906, + "ÂŃ": 3907, + "ĠIraq": 3908, + "ĠSaturday": 3909, + "Ġaware": 3910, + "part": 3911, + "Ġpattern": 3912, + "OW": 3913, + "ĠLet": 3914, + "Ġgrad": 3915, + "igned": 3916, + "Ġassociated": 3917, + "Ġstyle": 3918, + "no": 3919, + "iation": 3920, + "aith": 3921, + "ilies": 3922, + "Ġstories": 3923, + "uration": 3924, + "Ġindividuals": 3925, + "ĠâĢ¦": 3926, + "miss": 3927, + "ĠAssoci": 3928, + "ishing": 3929, + "aby": 3930, + "Ġsummer": 3931, + "ĠBen": 3932, + "Ġ32": 3933, + "Ġarch": 3934, + "uty": 3935, + "ĠTexas": 3936, + "hol": 3937, + "Ġfully": 3938, + "Ġmill": 3939, + "Ġfollowed": 3940, + "ĠBill": 3941, + "ĠIndian": 3942, + "ĠSecret": 3943, + "ĠBel": 3944, + "ĠFebruary": 3945, + "Ġjobs": 3946, + "Ġseemed": 3947, + "ĠGovern": 3948, + "ipped": 3949, + "Ġreality": 3950, + "Ġlines": 3951, + "Ġpark": 3952, + "Ġmeasure": 3953, + "ĠOur": 3954, + "IM": 3955, + "Ġbrother": 3956, + "Ġgrowing": 3957, + "Ġban": 3958, + "Ġestim": 3959, + "Ġcry": 3960, + "ĠSchool": 3961, + "Ġmechan": 3962, + "ĠOF": 3963, + "ĠWindows": 3964, + "Ġrates": 3965, + "ĠOh": 3966, + "Ġpositive": 3967, + "Ġculture": 3968, + "istics": 3969, + "ica": 3970, + "Ġhar": 3971, + "ya": 3972, + "itely": 3973, + "ipp": 3974, + "Ġmap": 3975, + "encies": 3976, + "ĠWilliam": 3977, + "II": 3978, + "akers": 3979, + "56": 3980, + "ĠMart": 3981, + "ĠRem": 3982, + "Ġaltern": 3983, + "itude": 3984, + "Ġcoach": 3985, + "rowd": 3986, + "Don": 3987, + "Ġkids": 3988, + "Ġjournal": 3989, + "Ġcorpor": 3990, + "Ġfalse": 3991, + "Ġweb": 3992, + "Ġsleep": 3993, + "Ġcontain": 3994, + "Ġsto": 3995, + "Ġbed": 3996, + "iverse": 3997, + "ĠRich": 3998, + "ĠChinese": 3999, + "Ġpun": 4000, + "Ġmeant": 4001, + "known": 4002, + "Ġnotice": 4003, + "Ġfavorite": 4004, + "aven": 4005, + "Ġcondition": 4006, + "Ġpurpose": 4007, + "))": 4008, + "Ġorganization": 4009, + "Ġchalleng": 4010, + "Ġmanufact": 4011, + "Ġsusp": 4012, + "ĠAc": 4013, + "Ġcritic": 4014, + "unes": 4015, + "uclear": 4016, + "Ġmer": 4017, + "vention": 4018, + "Ġ80": 4019, + "Ġmist": 4020, + "ĠUs": 4021, + "ĠTor": 4022, + "http": 4023, + "olf": 4024, + "Ġlarger": 4025, + "Ġadvant": 4026, + "Ġresear": 4027, + "Ġactions": 4028, + "ml": 4029, + "Ġkept": 4030, + "Ġaim": 4031, + ",'": 4032, + "col": 4033, + "Ġbenefits": 4034, + "ifying": 4035, + "Ġactual": 4036, + "ĠInternational": 4037, + "Ġvehicle": 4038, + "Ġchief": 4039, + "Ġefforts": 4040, + "ĠLeague": 4041, + "ĠMost": 4042, + "Ġwait": 4043, + "Ġadult": 4044, + "Ġoverall": 4045, + "Ġspeech": 4046, + "Ġhighly": 4047, + "Ġfemale": 4048, + "Ġerror": 4049, + "Ġeffective": 4050, + "54": 4051, + "Ġencour": 4052, + "well": 4053, + "Ġfailed": 4054, + "Ġconserv": 4055, + "Ġprograms": 4056, + "Ġtrou": 4057, + "Ġahead": 4058, + "500": 4059, + "vertisement": 4060, + "IP": 4061, + "ĠFound": 4062, + "pir": 4063, + "Ġ%": 4064, + "Ġcrime": 4065, + "ander": 4066, + "Ġlocation": 4067, + "ĠIran": 4068, + "Ġbehavior": 4069, + "azing": 4070, + "Ġrare": 4071, + "Ġemb": 4072, + "Ġcaused": 4073, + "Ġship": 4074, + "Ġactive": 4075, + "Ġcontribut": 4076, + "Ġgreen": 4077, + "Ġacqu": 4078, + "Ġreflect": 4079, + "venue": 4080, + "Ġfirm": 4081, + "Ġbirth": 4082, + "].": 4083, + "Ġclearly": 4084, + "Ġemot": 4085, + "Ġagency": 4086, + "riage": 4087, + "Ġmemory": 4088, + "98": 4089, + "SA": 4090, + "ĠSee": 4091, + "acing": 4092, + "CC": 4093, + "Ġbiggest": 4094, + "Ġrap": 4095, + "Ġbasic": 4096, + "Ġband": 4097, + "eat": 4098, + "Ġsuspect": 4099, + "ĠMac": 4100, + "Ġ90": 4101, + "mark": 4102, + "istan": 4103, + "Ġspread": 4104, + "ams": 4105, + "ki": 4106, + "asy": 4107, + "rav": 4108, + "ĠRober": 4109, + "Ġdemonstr": 4110, + "rated": 4111, + "Ġabsolute": 4112, + "Ġplaces": 4113, + "Ġimpl": 4114, + "ibrary": 4115, + "Ġcards": 4116, + "Ġdestroy": 4117, + "Ġvirt": 4118, + "vere": 4119, + "Ġappeared": 4120, + "yan": 4121, + "point": 4122, + "Ġbeg": 4123, + "Ġtemper": 4124, + "spe": 4125, + "anted": 4126, + "ears": 4127, + "ĠDirect": 4128, + "Ġlength": 4129, + "Ġblog": 4130, + "amb": 4131, + "Ġinteg": 4132, + "Ġresources": 4133, + "acc": 4134, + "iful": 4135, + "Ġspot": 4136, + "Ġforced": 4137, + "Ġthousands": 4138, + "ĠMinister": 4139, + "Ġqual": 4140, + "ĠFrench": 4141, + "atically": 4142, + "Ġgenerally": 4143, + "Ġdrink": 4144, + "Ġthus": 4145, + "IL": 4146, + "odes": 4147, + "Ġappropri": 4148, + "ĠRead": 4149, + "Ġwhom": 4150, + "Ġeye": 4151, + "Ġcollege": 4152, + "Ġ45": 4153, + "irection": 4154, + "Ġensure": 4155, + "Ġapparent": 4156, + "iders": 4157, + "Ġreligious": 4158, + "Ġminor": 4159, + "olic": 4160, + "Ġtro": 4161, + "ĠWhy": 4162, + "ribute": 4163, + "met": 4164, + "Ġprimary": 4165, + "Ġdeveloped": 4166, + "Ġpeace": 4167, + "Ġskin": 4168, + "ste": 4169, + "ava": 4170, + "Ġblue": 4171, + "Ġfamilies": 4172, + "Ġir": 4173, + "Ġapply": 4174, + "Ġinform": 4175, + "ĠSmith": 4176, + "CT": 4177, + "ii": 4178, + "Ġlimit": 4179, + "Ġresist": 4180, + "................": 4181, + "umn": 4182, + "Ġconflic": 4183, + "Ġtwe": 4184, + "udd": 4185, + "ĠTom": 4186, + "Ġliter": 4187, + "que": 4188, + "bon": 4189, + "Ġhair": 4190, + "Ġeventually": 4191, + "Ġpus": 4192, + "Ġhelped": 4193, + "Ġagg": 4194, + "orney": 4195, + "ĠApple": 4196, + "Ġfit": 4197, + "ĠSur": 4198, + "Ġprem": 4199, + "Ġsales": 4200, + "Ġseconds": 4201, + "Ġstrength": 4202, + "Ġfeeling": 4203, + "¿½": 4204, + "Ġtour": 4205, + "Ġknows": 4206, + "oom": 4207, + "Ġexerc": 4208, + "Ġsomew": 4209, + "�": 4210, + ">>": 4211, + "Ġspokes": 4212, + "Ġideas": 4213, + "Ġregist": 4214, + "soft": 4215, + "ĠDel": 4216, + "ĠPC": 4217, + "Ġpropos": 4218, + "Ġlaunch": 4219, + "Ġbottom": 4220, + "TH": 4221, + "ĠPlease": 4222, + "vest": 4223, + "itz": 4224, + "ĠInter": 4225, + "Ġscript": 4226, + "Ġrat": 4227, + "arning": 4228, + "Ġil": 4229, + "ĠJer": 4230, + "ĠAre": 4231, + "Ġwhatever": 4232, + "oken": 4233, + "cience": 4234, + "Ġmode": 4235, + "Ġagree": 4236, + "Ġsources": 4237, + "Ġinitial": 4238, + "Ġrestrict": 4239, + "Ġwonder": 4240, + "usion": 4241, + "####": 4242, + "ĠSil": 4243, + "ville": 4244, + "Ġburn": 4245, + "tw": 4246, + "asion": 4247, + "Ġ£": 4248, + "Ġnor": 4249, + "uing": 4250, + "Ġreached": 4251, + "Ġsun": 4252, + "Ġcateg": 4253, + "igration": 4254, + "Ġcook": 4255, + "Ġpromot": 4256, + "Ġmale": 4257, + "Ġclimate": 4258, + "Ġfix": 4259, + "Ġalleged": 4260, + "UR": 4261, + "alled": 4262, + "Ġimages": 4263, + "Cont": 4264, + "ota": 4265, + "Ġschools": 4266, + "ios": 4267, + "Ġdrop": 4268, + "Ġstream": 4269, + "ĠMo": 4270, + "Ġpreviously": 4271, + "aling": 4272, + "Ġpet": 4273, + "Ġdouble": 4274, + "Ġ(@": 4275, + "annel": 4276, + "Ġdefault": 4277, + "ties": 4278, + "Ġrank": 4279, + "ĠDec": 4280, + "ĠCouncil": 4281, + "Ġweapon": 4282, + "Ġstock": 4283, + "Ġanaly": 4284, + "ĠStr": 4285, + "Ġpicture": 4286, + "ĠPolice": 4287, + "ference": 4288, + "Ġcentury": 4289, + "Ġcitizens": 4290, + "Ġonto": 4291, + "Ġexpand": 4292, + "Ġhero": 4293, + "ĠSol": 4294, + "Ġwild": 4295, + "Ġupdate": 4296, + "Ġcustomers": 4297, + "ront": 4298, + "def": 4299, + "Ġlik": 4300, + "Ġcriminal": 4301, + "ĠChristian": 4302, + "SP": 4303, + "76": 4304, + "Ġleaving": 4305, + "Ġotherwise": 4306, + "ĠDist": 4307, + "Ġbasis": 4308, + "52": 4309, + "53": 4310, + "icip": 4311, + "ĠBer": 4312, + "Ġrecommend": 4313, + "Ġfloor": 4314, + "Ġcrowd": 4315, + "oles": 4316, + "Ġ70": 4317, + "Ġcentral": 4318, + "ĠEv": 4319, + "Ġdream": 4320, + "Ġdownload": 4321, + "Ġconfir": 4322, + "ĠThom": 4323, + "Ġwindow": 4324, + "Ġhappens": 4325, + "Ġunit": 4326, + "Ġtend": 4327, + "Ġspl": 4328, + "Ġbecomes": 4329, + "Ġfighting": 4330, + "Ġpredict": 4331, + "ĠPress": 4332, + "ĠPower": 4333, + "Ġheavy": 4334, + "aked": 4335, + "Ġfan": 4336, + "orter": 4337, + "ategy": 4338, + "BA": 4339, + "izes": 4340, + "Ġspend": 4341, + "Here": 4342, + "Ġ2007": 4343, + "Ġadop": 4344, + "ĠHam": 4345, + "Ġfootball": 4346, + "ĠPort": 4347, + "oday": 4348, + "51": 4349, + "ampions": 4350, + "Ġtransfer": 4351, + "ht": 4352, + "Ġ38": 4353, + "term": 4354, + "acity": 4355, + "Ġbur": 4356, + "],": 4357, + "ternal": 4358, + "rig": 4359, + "but": 4360, + "Ġtherefore": 4361, + "ĠBecause": 4362, + "resp": 4363, + "rey": 4364, + "Ġmission": 4365, + "Some": 4366, + "Ġnoted": 4367, + "Ġassum": 4368, + "Ġdisease": 4369, + "Ġedit": 4370, + "Ġprogress": 4371, + "rd": 4372, + "ĠBrown": 4373, + "ocal": 4374, + "Ġadding": 4375, + "Ġraised": 4376, + "ĠAny": 4377, + "Ġtick": 4378, + "Ġseeing": 4379, + "ĠPeople": 4380, + "Ġagreement": 4381, + "Ġserver": 4382, + "Ġwat": 4383, + "Ġdebate": 4384, + "Ġsupposed": 4385, + "iling": 4386, + "Ġlargest": 4387, + "Ġsuccessful": 4388, + "ĠPri": 4389, + "ĠDemocratic": 4390, + "Ġjump": 4391, + "ĠSyria": 4392, + "Ġowners": 4393, + "Ġoffers": 4394, + "Ġshooting": 4395, + "Ġeffic": 4396, + "sey": 4397, + "Ġhaven": 4398, + "verse": 4399, + "tered": 4400, + "ĠLight": 4401, + "imal": 4402, + "ĠBig": 4403, + "Ġdefend": 4404, + "Ġbeat": 4405, + "Ġrecords": 4406, + "%)": 4407, + "Ġscen": 4408, + "Ġemployees": 4409, + "Ġdevices": 4410, + "hem": 4411, + "Ġcommer": 4412, + "ĠMex": 4413, + "Ġbenefit": 4414, + "ĠProf": 4415, + "Ġilleg": 4416, + "Ġsurface": 4417, + "ĠAlso": 4418, + "Ġharm": 4419, + "ingly": 4420, + "wide": 4421, + "ĠAlex": 4422, + "Ġshut": 4423, + "ĠCur": 4424, + "Ġlose": 4425, + "pm": 4426, + "Ġchallenge": 4427, + "semb": 4428, + "Ġstation": 4429, + "Ġintelligence": 4430, + "Ġaccur": 4431, + "ĠFlor": 4432, + "Ġrequires": 4433, + "ĠMal": 4434, + "bum": 4435, + "Ġhospital": 4436, + "Ġspirit": 4437, + "Ġoffered": 4438, + "Ġproduce": 4439, + "ĠCommun": 4440, + "Ġcreating": 4441, + "Ġcris": 4442, + "spect": 4443, + "Ġended": 4444, + "Ġdaily": 4445, + "Ġvoters": 4446, + "lands": 4447, + "ias": 4448, + "ih": 4449, + "ona": 4450, + "Ġsmart": 4451, + "ĠOffice": 4452, + "ĠLord": 4453, + "rial": 4454, + "ĠInternet": 4455, + "Ġcircum": 4456, + "Ġextremely": 4457, + "'.": 4458, + "Ġopinion": 4459, + "ĠMil": 4460, + "Ġgain": 4461, + "BS": 4462, + "ĠFin": 4463, + "yp": 4464, + "Ġuseful": 4465, + "Ġbudget": 4466, + "Ġcomfort": 4467, + "isf": 4468, + "Ġbackground": 4469, + "eline": 4470, + "Ġepisode": 4471, + "Ġenemy": 4472, + "Ġtrial": 4473, + "Ġestablish": 4474, + "date": 4475, + "ĠCap": 4476, + "Ġcontinues": 4477, + "Ġshowing": 4478, + "ĠUnion": 4479, + "with": 4480, + "Ġposted": 4481, + "ĠSystem": 4482, + "Ġeat": 4483, + "rian": 4484, + "Ġrise": 4485, + "ĠGermany": 4486, + "ils": 4487, + "Ġsigned": 4488, + "Ġvill": 4489, + "Ġgrand": 4490, + "mor": 4491, + "ĠEngland": 4492, + "Ġprojects": 4493, + "umber": 4494, + "Ġconference": 4495, + "za": 4496, + "Ġresponsible": 4497, + "ĠArab": 4498, + "Ġlearned": 4499, + "âĢĶâĢĶ": 4500, + "ipping": 4501, + "ĠGeorge": 4502, + "OC": 4503, + "Ġreturned": 4504, + "ĠAustralia": 4505, + "Ġbrief": 4506, + "Qu": 4507, + "Ġbrand": 4508, + "illing": 4509, + "abled": 4510, + "Ġhighest": 4511, + "Ġtrain": 4512, + "ĠCommission": 4513, + "while": 4514, + "Ġnom": 4515, + "ception": 4516, + "Ġmut": 4517, + "ĠBlue": 4518, + "Ġincident": 4519, + "vant": 4520, + "86": 4521, + "ĠID": 4522, + "Ġnuclear": 4523, + "74": 4524, + "ĠLike": 4525, + "ĠRE": 4526, + "ĠMicro": 4527, + "li": 4528, + "mail": 4529, + "Ġcharges": 4530, + "89": 4531, + "Ġadjust": 4532, + "ado": 4533, + "Ġearth": 4534, + "NA": 4535, + "Ġprices": 4536, + "PA": 4537, + "Ġdraft": 4538, + "Ġruns": 4539, + "Ġcandidate": 4540, + "enses": 4541, + "Ġmanagement": 4542, + "ĠPhil": 4543, + "ĠMiss": 4544, + "Ġteach": 4545, + "gram": 4546, + "Ġunderstanding": 4547, + "ait": 4548, + "icago": 4549, + "Add": 4550, + "ĠEp": 4551, + "secut": 4552, + "Ġseparate": 4553, + "Ġinstance": 4554, + "Ġeth": 4555, + "Ġunless": 4556, + "********": 4557, + "ĠFore": 4558, + "inate": 4559, + "Ġoperations": 4560, + "Sp": 4561, + "Ġfaith": 4562, + "gar": 4563, + "ĠChurch": 4564, + "ronic": 4565, + "Ġconfig": 4566, + "osure": 4567, + "Ġactivities": 4568, + "Ġtraditional": 4569, + "Ġ36": 4570, + "Ġdirection": 4571, + "Ġmachine": 4572, + "Ġsurround": 4573, + "Ġpush": 4574, + "unction": 4575, + "ĠEU": 4576, + "Ġeasier": 4577, + "Ġargument": 4578, + "GB": 4579, + "Ġmicro": 4580, + "Ġspending": 4581, + "izations": 4582, + "Ġtheory": 4583, + "adow": 4584, + "Ġcalling": 4585, + "ĠLast": 4586, + "Ġder": 4587, + "Ġinfluence": 4588, + "Ġcommit": 4589, + "Ġphoto": 4590, + "Ġunc": 4591, + "istry": 4592, + "gn": 4593, + "aste": 4594, + "acks": 4595, + "Ġdisp": 4596, + "ady": 4597, + "do": 4598, + "ĠGood": 4599, + "Ġ`": 4600, + "Ġwish": 4601, + "Ġrevealed": 4602, + "³³": 4603, + "lig": 4604, + "Ġenforce": 4605, + "ĠCommittee": 4606, + "Ġchem": 4607, + "Ġmiles": 4608, + "Ġinterested": 4609, + "Ġsolution": 4610, + "icy": 4611, + "inct": 4612, + "Ġ->": 4613, + "ĠDet": 4614, + "Ġremoved": 4615, + "Ġcompar": 4616, + "eah": 4617, + "Ġplant": 4618, + "ĠSince": 4619, + "Ġachieve": 4620, + "Ġadvantage": 4621, + "Ġslightly": 4622, + "bing": 4623, + "Ġplaced": 4624, + "under": 4625, + "2015": 4626, + "ĠMad": 4627, + "Ġtim": 4628, + "oses": 4629, + "Ġcru": 4630, + "ĠRock": 4631, + "Ġmostly": 4632, + "Ġnegative": 4633, + "Ġsetting": 4634, + "Ġproduced": 4635, + "Ġmur": 4636, + "Ġconnection": 4637, + "ĠMer": 4638, + "Ġdriver": 4639, + "Ġexecutive": 4640, + "Ġassault": 4641, + "Ġborn": 4642, + "ĠVer": 4643, + "tained": 4644, + "Ġstructure": 4645, + "Ġreduce": 4646, + "Ġdecades": 4647, + "Ġded": 4648, + "uke": 4649, + "ĠMany": 4650, + "idden": 4651, + "Ġleague": 4652, + "Se": 4653, + "Ġjoin": 4654, + "Ġdisco": 4655, + "Ġdie": 4656, + "cks": 4657, + "actions": 4658, + "Ġassess": 4659, + "agn": 4660, + "Ġgoals": 4661, + "ours": 4662, + "IR": 4663, + "Ġsenior": 4664, + "iller": 4665, + "mod": 4666, + "ipment": 4667, + "ocol": 4668, + "uy": 4669, + "ĠQue": 4670, + "Ġparties": 4671, + "irgin": 4672, + "Ġlearning": 4673, + "itable": 4674, + "Ġstreet": 4675, + "Ġcamera": 4676, + "App": 4677, + "Ġskills": 4678, + "bre": 4679, + "cious": 4680, + "Ġcelebr": 4681, + "ĠFranc": 4682, + "Ġexisting": 4683, + "Ġwilling": 4684, + "lor": 4685, + "Ġid": 4686, + "ĠSpace": 4687, + "Ġcritical": 4688, + "ĠLa": 4689, + "ortunately": 4690, + "Ġserve": 4691, + "Ġcold": 4692, + "Ġspecies": 4693, + "TS": 4694, + "Ġanimals": 4695, + "ĠBay": 4696, + "Ġolder": 4697, + "ĠUnder": 4698, + "estic": 4699, + "ĠTre": 4700, + "Ġteacher": 4701, + "Ġprefer": 4702, + "vis": 4703, + "Ġthread": 4704, + "ĠMatt": 4705, + "Ġmanager": 4706, + "ãĥ»": 4707, + "Ġprofessional": 4708, + "ĠVol": 4709, + "Ġnotes": 4710, + "These": 4711, + "ula": 4712, + "Ġfresh": 4713, + "ented": 4714, + "uzz": 4715, + "edy": 4716, + "clusion": 4717, + "ĠRel": 4718, + "Ġdoubt": 4719, + "EO": 4720, + "Ġopened": 4721, + "ĠBit": 4722, + "Advertisement": 4723, + "Ġguess": 4724, + "ĠUN": 4725, + "Ġsequ": 4726, + "Ġexplain": 4727, + "otten": 4728, + "Ġattract": 4729, + "aks": 4730, + "Ġstring": 4731, + "Ġcontext": 4732, + "ossible": 4733, + "ĠRepublicans": 4734, + "Ġsolid": 4735, + "Ġcities": 4736, + "Ġasking": 4737, + "Ġrandom": 4738, + "ups": 4739, + "uries": 4740, + "arant": 4741, + "dden": 4742, + "gl": 4743, + "ĠFlorida": 4744, + "Ġdepend": 4745, + "ĠScott": 4746, + "Ġ33": 4747, + "ĠiT": 4748, + "icon": 4749, + "Ġmentioned": 4750, + "Ġ2000": 4751, + "Ġclaimed": 4752, + "Ġdefinitely": 4753, + "ulf": 4754, + "Ġcore": 4755, + "Ġopening": 4756, + "ĠConst": 4757, + "which": 4758, + "ĠTra": 4759, + "AG": 4760, + "72": 4761, + "Ġbelieved": 4762, + "ada": 4763, + "Ġ48": 4764, + "ĠSecurity": 4765, + "yright": 4766, + "ĠPet": 4767, + "ĠLou": 4768, + "Ġholding": 4769, + "================": 4770, + "Ġice": 4771, + "Ġbrow": 4772, + "Ġauthorities": 4773, + "host": 4774, + "word": 4775, + "Ġscore": 4776, + "ĠDiv": 4777, + "Ġcells": 4778, + "Ġtransl": 4779, + "Ġneighbor": 4780, + "Ġremove": 4781, + "uct": 4782, + "Ġdistrict": 4783, + "ĠAccording": 4784, + "Ġworse": 4785, + "Ġconcerns": 4786, + "Ġpresidential": 4787, + "Ġpolicies": 4788, + "ĠHall": 4789, + "73": 4790, + "Ġhus": 4791, + "AY": 4792, + "Ġ2006": 4793, + "ĠJud": 4794, + "Ġindependent": 4795, + "ĠJustice": 4796, + "iliar": 4797, + "print": 4798, + "ighter": 4799, + "Ġprotection": 4800, + "zen": 4801, + "Ġsudden": 4802, + "house": 4803, + "ĠJes": 4804, + "PR": 4805, + "ĠInf": 4806, + "Ġbul": 4807, + "Ġ_": 4808, + "ĠService": 4809, + "ĠPR": 4810, + "Ġstrategy": 4811, + "ffect": 4812, + "Ġgirls": 4813, + "Ġmissing": 4814, + "oyal": 4815, + "ĠTeam": 4816, + "ulated": 4817, + "Ġdat": 4818, + "Ġpolitics": 4819, + "abor": 4820, + "According": 4821, + "Ġspell": 4822, + "Ġgraph": 4823, + "orthern": 4824, + "TC": 4825, + "Ab": 4826, + "Ġlabor": 4827, + "isher": 4828, + "Ġkick": 4829, + "ĠiTunes": 4830, + "Ġsteps": 4831, + "poses": 4832, + "Ġsmaller": 4833, + "En": 4834, + "bert": 4835, + "Ġroll": 4836, + "Ġresearchers": 4837, + "Ġclosed": 4838, + "Ġtransport": 4839, + "Ġlawy": 4840, + "________________": 4841, + "ĠChicago": 4842, + "Ġaspect": 4843, + "Ġnone": 4844, + "Ġmarriage": 4845, + "96": 4846, + "Ġelements": 4847, + "ĠFre": 4848, + "ĠSal": 4849, + "Ġdram": 4850, + "FC": 4851, + "top": 4852, + "equ": 4853, + "Ġhearing": 4854, + "Ġsupported": 4855, + "Ġtesting": 4856, + "cohol": 4857, + "Ġmassive": 4858, + "Ġstick": 4859, + "Ġguard": 4860, + "isco": 4861, + "phone": 4862, + "From": 4863, + "However": 4864, + "Ġborder": 4865, + "Ġcopy": 4866, + "ography": 4867, + "list": 4868, + "71": 4869, + "Ġowner": 4870, + "class": 4871, + "ruit": 4872, + "rate": 4873, + "ĠOnce": 4874, + "Ġdigital": 4875, + "Ġtask": 4876, + "ERS": 4877, + "Ġincred": 4878, + "tes": 4879, + "++": 4880, + "ĠFrance": 4881, + "Ġbreat": 4882, + "owl": 4883, + "Ġissued": 4884, + "ĠWestern": 4885, + "Ġdetect": 4886, + "Ġpartners": 4887, + "Ġshared": 4888, + "ĠCall": 4889, + "Ġcancer": 4890, + "ache": 4891, + "ribe": 4892, + "Ġexplained": 4893, + "Ġheat": 4894, + "{\"": 4895, + "Ġinvestment": 4896, + "ĠBook": 4897, + "Ġwood": 4898, + "Ġtools": 4899, + "ĠAlthough": 4900, + "Ġbelief": 4901, + "Ġcrisis": 4902, + "Ġge": 4903, + "ĠMP": 4904, + "Ġoperation": 4905, + "type": 4906, + "~~": 4907, + "ga": 4908, + "Ġcontains": 4909, + "anta": 4910, + "Ġexpress": 4911, + "ĠGroup": 4912, + "ĠJournal": 4913, + "ka": 4914, + "Ġamb": 4915, + "ĠUSA": 4916, + "Ġfinding": 4917, + "Ġfunding": 4918, + "how": 4919, + "Ġestablished": 4920, + "ideos": 4921, + "Ġdegree": 4922, + "Ġdangerous": 4923, + "anging": 4924, + "Ġfreedom": 4925, + "pport": 4926, + "outhern": 4927, + "Ġchurch": 4928, + "Ġcatch": 4929, + "ĠTwo": 4930, + "Ġpresence": 4931, + "ĠGuard": 4932, + "Up": 4933, + "Ġauthority": 4934, + "ĠProject": 4935, + "Ġbutton": 4936, + "Ġconsequ": 4937, + "Ġvalid": 4938, + "Ġweak": 4939, + "Ġstarts": 4940, + "Ġreference": 4941, + "ĠMem": 4942, + "\")": 4943, + "UN": 4944, + "orage": 4945, + "ĠOpen": 4946, + "Ġcollection": 4947, + "ym": 4948, + "gency": 4949, + "Ġbeautiful": 4950, + "ros": 4951, + "Ġtells": 4952, + "Ġwaiting": 4953, + "nel": 4954, + "Ġproviding": 4955, + "ĠDemocrats": 4956, + "Ġdaughter": 4957, + "Ġmaster": 4958, + "Ġpurposes": 4959, + "ĠJapanese": 4960, + "Ġequal": 4961, + "Ġturns": 4962, + "Ġdocuments": 4963, + "Ġwatching": 4964, + "Res": 4965, + "Ġran": 4966, + "2014": 4967, + "Ġreject": 4968, + "ĠKorea": 4969, + "Ġvictims": 4970, + "Level": 4971, + "erences": 4972, + "Ġwitness": 4973, + "Ġ34": 4974, + "Ġreform": 4975, + "coming": 4976, + "Ġoccup": 4977, + "Ġcaught": 4978, + "Ġtraffic": 4979, + "ading": 4980, + "Ġmodels": 4981, + "ario": 4982, + "Ġserved": 4983, + "Ġbatter": 4984, + "uate": 4985, + "ĠSecretary": 4986, + "Ġagreed": 4987, + "Ġtruly": 4988, + "ynam": 4989, + "ĠRet": 4990, + "Ġunits": 4991, + "ĠResearch": 4992, + "hand": 4993, + "azine": 4994, + "ĠMike": 4995, + "Ġvariety": 4996, + "otal": 4997, + "Ġamazing": 4998, + "Ġconfirmed": 4999, + "Ġentirely": 5000, + "Ġpurchase": 5001, + "Ġelement": 5002, + "Ġcash": 5003, + "Ġdetermine": 5004, + "De": 5005, + "Ġcars": 5006, + "ĠWall": 5007, + "âĸ": 5008, + "Ġviews": 5009, + "Ġdrugs": 5010, + "Ġdepartment": 5011, + "ĠStep": 5012, + "uit": 5013, + "Ġ39": 5014, + "asure": 5015, + "ĠClass": 5016, + "Ġcovered": 5017, + "ĠBank": 5018, + "Ġmere": 5019, + "uana": 5020, + "Ġmulti": 5021, + "Ġmix": 5022, + "Ġunlike": 5023, + "levision": 5024, + "Ġstopped": 5025, + "Ġsem": 5026, + "ĠGal": 5027, + "ules": 5028, + "Ġwel": 5029, + "ĠJohnson": 5030, + "la": 5031, + "Ġskill": 5032, + "Ġbecoming": 5033, + "rie": 5034, + "Ġappropriate": 5035, + "fe": 5036, + "ellow": 5037, + "ĠProt": 5038, + "ulate": 5039, + "ocation": 5040, + "Ġweekend": 5041, + "odies": 5042, + "Ġsites": 5043, + "Ġanimal": 5044, + "ĠTim": 5045, + "Ġscale": 5046, + "Ġcharged": 5047, + "Ġinstruct": 5048, + "illa": 5049, + "Ġmethods": 5050, + "Ġcert": 5051, + "Ġjudge": 5052, + "ĠHel": 5053, + "Ġdollars": 5054, + "Ġstanding": 5055, + "ĠSqu": 5056, + "Ġdebt": 5057, + "liam": 5058, + "Ġdriving": 5059, + "ĠSum": 5060, + "ĠEdition": 5061, + "Ġalbum": 5062, + "andon": 5063, + "IF": 5064, + "ĠUk": 5065, + "63": 5066, + "ader": 5067, + "Ġcommercial": 5068, + "esh": 5069, + "ĠGovernment": 5070, + "Ġdiscovered": 5071, + "Ġoutput": 5072, + "ĠHillary": 5073, + "ĠCarol": 5074, + "Ġ2005": 5075, + "Ġabuse": 5076, + "ancing": 5077, + "Ġswitch": 5078, + "Ġannual": 5079, + "Tw": 5080, + "Ġstated": 5081, + "agement": 5082, + "inner": 5083, + "Ġdemocr": 5084, + "Ġresidents": 5085, + "Ġallowing": 5086, + "Ġfactors": 5087, + "odd": 5088, + "Ġfuck": 5089, + "emies": 5090, + "Ġoccurred": 5091, + "oti": 5092, + "Ġnorth": 5093, + "ĠPublic": 5094, + "Ġinjury": 5095, + "Ġinsurance": 5096, + "CL": 5097, + "olly": 5098, + "ãĢ": 5099, + "Ġrepeated": 5100, + "Ġarms": 5101, + "anged": 5102, + "Ġconstruction": 5103, + "Ġfle": 5104, + "PU": 5105, + "icians": 5106, + "Ġforms": 5107, + "ĠMcC": 5108, + "antic": 5109, + "Ġmental": 5110, + "pire": 5111, + "Ġequipment": 5112, + "Ġfant": 5113, + "Ġdiscussion": 5114, + "Ġregarding": 5115, + "kin": 5116, + "arp": 5117, + "Ġchair": 5118, + "ogue": 5119, + "Ġproceed": 5120, + "ĠId": 5121, + "Our": 5122, + "Ġmurder": 5123, + "Man": 5124, + "Ġ49": 5125, + "asp": 5126, + "Ġsupply": 5127, + "Ġinput": 5128, + "Ġwealth": 5129, + "liament": 5130, + "Ġproced": 5131, + "orial": 5132, + "ĠStat": 5133, + "ĠNFL": 5134, + "hens": 5135, + "ĠInstitute": 5136, + "Ġputting": 5137, + "ournament": 5138, + "etic": 5139, + "Ġlocated": 5140, + "Ġkid": 5141, + "eria": 5142, + "run": 5143, + "Ġprinc": 5144, + "Ġ!": 5145, + "going": 5146, + "ĠBet": 5147, + "Ġclot": 5148, + "Ġtelling": 5149, + "Ġproposed": 5150, + "iot": 5151, + "orry": 5152, + "Ġfunds": 5153, + "gment": 5154, + "ĠLife": 5155, + "Ġbaby": 5156, + "ĠBack": 5157, + "Ġspoke": 5158, + "Image": 5159, + "Ġearn": 5160, + "ĠAT": 5161, + "gu": 5162, + "Ġexchange": 5163, + "ĠLin": 5164, + "oving": 5165, + "Ġpair": 5166, + "More": 5167, + "azon": 5168, + "Ġarrested": 5169, + "Ġkilling": 5170, + "can": 5171, + "ĠCard": 5172, + "yd": 5173, + "Ġidentified": 5174, + "Ġmobile": 5175, + "Ġthanks": 5176, + "onym": 5177, + "ĠForm": 5178, + "Ġhundreds": 5179, + "ĠChris": 5180, + "ĠCat": 5181, + "Ġtrend": 5182, + "hat": 5183, + "ĠAv": 5184, + "oman": 5185, + "Ġelectric": 5186, + "ĠWil": 5187, + "SE": 5188, + "Of": 5189, + "Ġrestaur": 5190, + "oted": 5191, + "Ġtrig": 5192, + "Ġnine": 5193, + "Ġbomb": 5194, + "Why": 5195, + "¯": 5196, + "Ġcoverage": 5197, + "Ġappeal": 5198, + "ĠRobert": 5199, + "ĠSup": 5200, + "Ġfinished": 5201, + "Ġflow": 5202, + "Ġdeliver": 5203, + "Ġcalcul": 5204, + "Ġphotos": 5205, + "Ġphil": 5206, + "Ġpieces": 5207, + "Ġappre": 5208, + "kes": 5209, + "Ġrough": 5210, + "Do": 5211, + "Ġpartner": 5212, + "Ġconcerned": 5213, + "Ġ37": 5214, + "ĠGen": 5215, + "Col": 5216, + "ctors": 5217, + "Ġ=>": 5218, + "state": 5219, + "Ġsuggested": 5220, + "ĠForce": 5221, + "CE": 5222, + "Ġherself": 5223, + "ĠPlan": 5224, + "works": 5225, + "ooth": 5226, + "rency": 5227, + "Ġcorner": 5228, + "Ġhusband": 5229, + "Ġinternet": 5230, + "ĠAut": 5231, + "ems": 5232, + "osen": 5233, + "ĠAtl": 5234, + "gen": 5235, + "Ġbalance": 5236, + "62": 5237, + "Ġsounds": 5238, + "text": 5239, + "Ġarr": 5240, + "oves": 5241, + "Ġmillions": 5242, + "Ġradio": 5243, + "Ġsatisf": 5244, + "ĠDam": 5245, + "Mr": 5246, + "Go": 5247, + "Spe": 5248, + "Ġcombat": 5249, + "rant": 5250, + "ĠGree": 5251, + "Ġfuel": 5252, + "Ġdistance": 5253, + "Ġtests": 5254, + "Ġdecre": 5255, + "ĠEr": 5256, + "Ġmanaged": 5257, + "DS": 5258, + "Ġtit": 5259, + "Ġmeasures": 5260, + "ĠLiber": 5261, + "Ġattend": 5262, + "ashed": 5263, + "ĠJose": 5264, + "ĠNight": 5265, + "dit": 5266, + "ĠNov": 5267, + "ĠEnd": 5268, + "outs": 5269, + "Ġgeneration": 5270, + "Ġadvoc": 5271, + "yth": 5272, + "Ġconversation": 5273, + "ĠSky": 5274, + "active": 5275, + "cel": 5276, + "rier": 5277, + "ĠFrank": 5278, + "Ġgender": 5279, + "Ġconcent": 5280, + "Ġcarried": 5281, + "anda": 5282, + "ĠVirgin": 5283, + "Ġarrived": 5284, + "icide": 5285, + "aded": 5286, + "Ġfailure": 5287, + "Ġminimum": 5288, + "lets": 5289, + "Ġworst": 5290, + "Ġkeeping": 5291, + "Ġintended": 5292, + "Ġillegal": 5293, + "Ġsubsc": 5294, + "Ġdetermined": 5295, + "Ġtrip": 5296, + "Yes": 5297, + "Ġraise": 5298, + "Ġ~": 5299, + "Ġfeels": 5300, + "Ġpackage": 5301, + "ĠJo": 5302, + "hi": 5303, + "2016": 5304, + "real": 5305, + "Ġfra": 5306, + "Ġsymb": 5307, + "Me": 5308, + "ucky": 5309, + "pret": 5310, + "ĠKh": 5311, + "ĠEdit": 5312, + "ĠWeb": 5313, + "emic": 5314, + "ĠColor": 5315, + "Ġjustice": 5316, + "Int": 5317, + "Ġfarm": 5318, + "cknow": 5319, + "\">": 5320, + "eless": 5321, + "Ġreduced": 5322, + "Ġ500": 5323, + "xx": 5324, + "ĠRad": 5325, + "ĠWood": 5326, + "Ġclin": 5327, + "Ġhyp": 5328, + "iler": 5329, + "ura": 5330, + "kins": 5331, + "85": 5332, + "61": 5333, + "ĠTheir": 5334, + "ĠMary": 5335, + "Ġsan": 5336, + "Ġnovel": 5337, + "ĠWho": 5338, + "Ġcapacity": 5339, + "Ġimpossible": 5340, + "Ġplays": 5341, + "Ġminister": 5342, + "ijuana": 5343, + "icate": 5344, + "ĠSet": 5345, + "Ġfram": 5346, + "Ġing": 5347, + "Ġcommunities": 5348, + "ĠFBI": 5349, + "ita": 5350, + "Ġbon": 5351, + "Ġstrateg": 5352, + "Ġinterests": 5353, + "lock": 5354, + "gers": 5355, + "mas": 5356, + "ĠAND": 5357, + "Ġconflict": 5358, + "Ġrequirements": 5359, + "Ġsac": 5360, + "Ġoperating": 5361, + "ini": 5362, + "related": 5363, + "Ġcommitted": 5364, + "Ġrelatively": 5365, + "Ġsouth": 5366, + "¯¯": 5367, + "Ġafford": 5368, + "Ġidentity": 5369, + "Ġdecisions": 5370, + "Ġaccused": 5371, + "place": 5372, + "Ġvictory": 5373, + "och": 5374, + "iat": 5375, + "Name": 5376, + "Com": 5377, + "tion": 5378, + "eds": 5379, + "Ġseek": 5380, + "Ġtight": 5381, + "ĠImages": 5382, + "Ġiniti": 5383, + "Ġhumans": 5384, + "Ġfamiliar": 5385, + "Ġaudience": 5386, + "Ġinternal": 5387, + "venture": 5388, + "Ġsides": 5389, + "ĠTO": 5390, + "Ġdim": 5391, + "Ġconclud": 5392, + "Ġappoint": 5393, + "Ġenforcement": 5394, + "ĠJim": 5395, + "ĠAssociation": 5396, + "Ġcircumst": 5397, + "ĠCanadian": 5398, + "Ġjoined": 5399, + "Ġdifferences": 5400, + "ĠLos": 5401, + "Ġprotest": 5402, + "Ġtwice": 5403, + "win": 5404, + "Ġglass": 5405, + "arsh": 5406, + "ĠArmy": 5407, + "Ġexpression": 5408, + "Ġdecide": 5409, + "Ġplanning": 5410, + "ania": 5411, + "Ġhandle": 5412, + "ĠMicrosoft": 5413, + "ĠNor": 5414, + "Ġmaximum": 5415, + "ĠRev": 5416, + "Ġsea": 5417, + "Ġeval": 5418, + "Ġhelps": 5419, + "ref": 5420, + "Ġbound": 5421, + "Ġmouth": 5422, + "Ġstandards": 5423, + "Ġclim": 5424, + "ĠCamp": 5425, + "ĠFox": 5426, + "cles": 5427, + "Ġarmy": 5428, + "ĠTechn": 5429, + "acking": 5430, + "xy": 5431, + "SS": 5432, + "Ġ42": 5433, + "Ġbug": 5434, + "ĠUkrain": 5435, + "ĠMax": 5436, + "ĠJones": 5437, + "ĠShow": 5438, + "lo": 5439, + "Ġplanet": 5440, + "Ġ75": 5441, + "Ġwinning": 5442, + "Ġfaster": 5443, + "Ġspect": 5444, + "Ġbroken": 5445, + "TR": 5446, + "Ġdefined": 5447, + "Ġhealthy": 5448, + "Ġcompetition": 5449, + "https": 5450, + "ĠIsland": 5451, + "ĠFe": 5452, + "Ġannounce": 5453, + "ĠCup": 5454, + "ĠInstead": 5455, + "Ġclient": 5456, + "Ġpossibly": 5457, + "section": 5458, + "ocket": 5459, + "look": 5460, + "Ġfinish": 5461, + "Ġcrew": 5462, + "Ġreserv": 5463, + "Ġeditor": 5464, + "Ġhate": 5465, + "Ġsale": 5466, + "Ġcontrovers": 5467, + "Ġpages": 5468, + "wing": 5469, + "Ġnumer": 5470, + "Ġopposition": 5471, + "Ġ2004": 5472, + "Ġrefuge": 5473, + "Ġflight": 5474, + "Ġapart": 5475, + "ĠLat": 5476, + "Americ": 5477, + "ĠAfrica": 5478, + "Ġapplications": 5479, + "ĠPalest": 5480, + "ĠBur": 5481, + "Ġgar": 5482, + "ĠSocial": 5483, + "Ġupgr": 5484, + "Ġshape": 5485, + "Ġspeaking": 5486, + "ansion": 5487, + "ao": 5488, + "ĠSn": 5489, + "Ġworry": 5490, + "ĠBritain": 5491, + "Please": 5492, + "roud": 5493, + "Ġhun": 5494, + "Ġintroduced": 5495, + "Ġdiet": 5496, + "Ind": 5497, + "ĠSecond": 5498, + "Ġfunctions": 5499, + "uts": 5500, + "ĠEach": 5501, + "ĠJeff": 5502, + "Ġstress": 5503, + "Ġaccounts": 5504, + "Ġguarant": 5505, + "ĠAnn": 5506, + "edia": 5507, + "Ġhonest": 5508, + "Ġtree": 5509, + "ĠAfrican": 5510, + "ĠBush": 5511, + "},": 5512, + "Ġsch": 5513, + "ĠOnly": 5514, + "Ġfif": 5515, + "igan": 5516, + "Ġexercise": 5517, + "ĠExp": 5518, + "Ġscientists": 5519, + "Ġlegislation": 5520, + "ĠWork": 5521, + "ĠSpr": 5522, + "ÃĤ": 5523, + "ĠHuman": 5524, + "Ġè": 5525, + "Ġsurvey": 5526, + "Ġrich": 5527, + "rip": 5528, + "Ġmaintain": 5529, + "Ġflo": 5530, + "Ġleadership": 5531, + "stream": 5532, + "ĠIslamic": 5533, + "Ġ01": 5534, + "ĠCollege": 5535, + "Ġmagic": 5536, + "ĠPrime": 5537, + "Ġfigures": 5538, + "2017": 5539, + "inder": 5540, + "xual": 5541, + "ĠDead": 5542, + "Ġabsolutely": 5543, + "Ġfourth": 5544, + "Ġpresented": 5545, + "respond": 5546, + "rible": 5547, + "Ġalcohol": 5548, + "ato": 5549, + "ĠDE": 5550, + "porary": 5551, + "Ġgrab": 5552, + "Ġvari": 5553, + "Ġquant": 5554, + "ĠPhoto": 5555, + "Ġplus": 5556, + "rick": 5557, + "arks": 5558, + "Ġalternative": 5559, + "Ġpil": 5560, + "Ġapprox": 5561, + "that": 5562, + "Ġobjects": 5563, + "ĠRo": 5564, + "ĠAndroid": 5565, + "Ġsignificantly": 5566, + "ĠRoad": 5567, + "kay": 5568, + "Read": 5569, + "avor": 5570, + "Ġacknow": 5571, + "ĠHD": 5572, + "ĠSing": 5573, + "Or": 5574, + "ĠMont": 5575, + "Ġuns": 5576, + "prof": 5577, + "Ġnegoti": 5578, + "ĠArch": 5579, + "iki": 5580, + "Ġtelevision": 5581, + "ĠJewish": 5582, + "Ġcommittee": 5583, + "Ġmotor": 5584, + "Ġappearance": 5585, + "Ġsitting": 5586, + "Ġstrike": 5587, + "ĠDown": 5588, + "comp": 5589, + "ĠHist": 5590, + "Ġfold": 5591, + "acement": 5592, + "ĠLouis": 5593, + "Ġbelong": 5594, + "ĠâĢ¢": 5595, + "Ġmort": 5596, + "Ġprepared": 5597, + "Ġ64": 5598, + "ĠMaster": 5599, + "Ġindeed": 5600, + "ĠDen": 5601, + "Ġrent": 5602, + "TA": 5603, + "ourney": 5604, + "arc": 5605, + "Su": 5606, + "97": 5607, + "Ġadvice": 5608, + "Ġchanging": 5609, + "Ġlisted": 5610, + "Ġlaunched": 5611, + "isation": 5612, + "ĠPeter": 5613, + "ishes": 5614, + "Ġlived": 5615, + "ĠMel": 5616, + "ĠSupreme": 5617, + "ĠFederal": 5618, + "Ġ);": 5619, + "ructure": 5620, + "Ġsets": 5621, + "Ġphilos": 5622, + "uous": 5623, + "ĠÂł": 5624, + "Ġapplied": 5625, + "ĠNOT": 5626, + "Ġhousing": 5627, + "ĠMount": 5628, + "Ġodd": 5629, + "Ġsust": 5630, + "DA": 5631, + "fficient": 5632, + "Ġ?": 5633, + "olved": 5634, + "Ġpowers": 5635, + "Ġthr": 5636, + "Ġremaining": 5637, + "ĠWater": 5638, + "LC": 5639, + "Ġcauses": 5640, + "ãģ®": 5641, + "Ġmanner": 5642, + "ads": 5643, + "Ġsuggests": 5644, + "Ġends": 5645, + "standing": 5646, + "fig": 5647, + "ĠDun": 5648, + "idth": 5649, + "Ġgay": 5650, + "Ġtermin": 5651, + "ĠAngeles": 5652, + "MS": 5653, + "Ġscientific": 5654, + "Ġcoal": 5655, + "apers": 5656, + "bar": 5657, + "ĠThomas": 5658, + "Ġsym": 5659, + "ĠRun": 5660, + "this": 5661, + "PC": 5662, + "igrants": 5663, + "Ġminute": 5664, + "ĠDistrict": 5665, + "cellent": 5666, + "Ġleaves": 5667, + "Ġcompleted": 5668, + "amin": 5669, + "Ġfocused": 5670, + "Ġmonitor": 5671, + "Ġvehicles": 5672, + "MA": 5673, + "ĠMass": 5674, + "ĠGrand": 5675, + "Ġaffected": 5676, + "itutional": 5677, + "Ġconstruct": 5678, + "Ġfollows": 5679, + "Ġton": 5680, + "reens": 5681, + "Ġhomes": 5682, + "ĠExt": 5683, + "ĠLevel": 5684, + "rast": 5685, + "ĠIr": 5686, + "Ġelim": 5687, + "Ġlargely": 5688, + "ĠJoe": 5689, + "Ġvotes": 5690, + "alls": 5691, + "Ġbusinesses": 5692, + "ĠFoundation": 5693, + "ĠCentral": 5694, + "Ġyards": 5695, + "Ġmaterials": 5696, + "ulner": 5697, + "Ġguide": 5698, + "Ġcloser": 5699, + "ums": 5700, + "Ġsports": 5701, + "eder": 5702, + "Just": 5703, + "Ġtaxes": 5704, + "84": 5705, + "ĠOld": 5706, + "Ġdecade": 5707, + "ola": 5708, + "Ġvir": 5709, + "Ġdropped": 5710, + "Ġdelay": 5711, + "itect": 5712, + "Ġsecure": 5713, + "stein": 5714, + "level": 5715, + "Ġtreated": 5716, + "Ġfiled": 5717, + "aine": 5718, + "Ġvan": 5719, + "Ġmir": 5720, + "Ġcolumn": 5721, + "icted": 5722, + "eper": 5723, + "Ġrot": 5724, + "Ġconsult": 5725, + "Ġentry": 5726, + "Ġmarijuana": 5727, + "ĠDou": 5728, + "Ġapparently": 5729, + "oking": 5730, + "clusive": 5731, + "Ġincreases": 5732, + "ano": 5733, + "Ġspecifically": 5734, + "Ġtele": 5735, + "ensions": 5736, + "Ġreligion": 5737, + "abilities": 5738, + "Ġframe": 5739, + "ĠNote": 5740, + "ĠLee": 5741, + "Ġhelping": 5742, + "Ġedge": 5743, + "oston": 5744, + "Ġorganizations": 5745, + "Ãĥ": 5746, + "ĠBoth": 5747, + "hips": 5748, + "Ġbigger": 5749, + "Ġboost": 5750, + "ĠStand": 5751, + "Ġrow": 5752, + "uls": 5753, + "abase": 5754, + "Ġrid": 5755, + "Let": 5756, + "aren": 5757, + "rave": 5758, + "Ġstret": 5759, + "PD": 5760, + "Ġvision": 5761, + "Ġwearing": 5762, + "Ġappreci": 5763, + "Ġaward": 5764, + "ĠUse": 5765, + "Ġfactor": 5766, + "war": 5767, + "ulations": 5768, + ")(": 5769, + "Ġgod": 5770, + "Ġterrit": 5771, + "Ġparam": 5772, + "asts": 5773, + "87": 5774, + "Ġenemies": 5775, + "ĠGames": 5776, + "FF": 5777, + "Ġaccident": 5778, + "Well": 5779, + "ĠMartin": 5780, + "TER": 5781, + "Ġath": 5782, + "ĠHell": 5783, + "Ġforg": 5784, + "Ġveter": 5785, + "ĠMedic": 5786, + "free": 5787, + "Ġstars": 5788, + "Ġexpensive": 5789, + "Ġacad": 5790, + "rawn": 5791, + "ĠWhe": 5792, + "Ġlock": 5793, + "Ġformat": 5794, + "Ġsoldiers": 5795, + "sm": 5796, + "Ġagent": 5797, + "Ġresponsibility": 5798, + "ora": 5799, + "ĠScience": 5800, + "Ġrapid": 5801, + "Ġtough": 5802, + "ĠJesus": 5803, + "Ġbelieves": 5804, + "ML": 5805, + "Ġwear": 5806, + "lete": 5807, + "ÃĥÃĤ": 5808, + "ĠDri": 5809, + "Ġcommission": 5810, + "ĠBob": 5811, + "Oh": 5812, + "aped": 5813, + "Ġwarm": 5814, + "ÃĥÃĤÃĥÃĤ": 5815, + "Ġ2003": 5816, + "ortion": 5817, + "Ġhasn": 5818, + "uster": 5819, + "Ġunivers": 5820, + "ĠIll": 5821, + "Ġking": 5822, + "ologies": 5823, + "94": 5824, + "ĠTem": 5825, + "ĠMos": 5826, + "Ġpatient": 5827, + "ĠMexico": 5828, + "cean": 5829, + "ĠDeath": 5830, + "ĠSanders": 5831, + "you": 5832, + "ĠCast": 5833, + "ĠCompany": 5834, + "pty": 5835, + "Ġhappening": 5836, + "FP": 5837, + "ĠBattle": 5838, + "Ġbought": 5839, + "Am": 5840, + "Mod": 5841, + "Us": 5842, + "uters": 5843, + "ĠCre": 5844, + "ĠThose": 5845, + "Ġ44": 5846, + "iser": 5847, + "Ġsoul": 5848, + "ĠTop": 5849, + "ĠHarry": 5850, + "ĠAw": 5851, + "Ġseat": 5852, + "ffee": 5853, + "Ġrevolution": 5854, + "Ġ(\"": 5855, + "ĠDuring": 5856, + "ette": 5857, + "Ġring": 5858, + "Ġoffensive": 5859, + "Ġreturns": 5860, + "Ġvideos": 5861, + "Ġdiscl": 5862, + "Ġfamous": 5863, + "enced": 5864, + "ĠSign": 5865, + "ĠRiver": 5866, + "Ġ300": 5867, + "PM": 5868, + "ĠBus": 5869, + "ĠCH": 5870, + "Ġcandidates": 5871, + "arden": 5872, + "Ġpercentage": 5873, + "Ġvisual": 5874, + "Ġthank": 5875, + "Ġtrouble": 5876, + "nergy": 5877, + "Ġ2001": 5878, + "Ġprove": 5879, + "ashion": 5880, + "Ġenh": 5881, + "ĠLong": 5882, + "UM": 5883, + "Ġconnected": 5884, + "Ġpossibility": 5885, + "Over": 5886, + "Ġexpert": 5887, + "Ġlibrary": 5888, + "arts": 5889, + "ĠDirector": 5890, + "Ġfellow": 5891, + "92": 5892, + "irty": 5893, + "Ġdry": 5894, + "Ġsigns": 5895, + "ĠLove": 5896, + "Ġquiet": 5897, + "foot": 5898, + "Ġpure": 5899, + "ĠHun": 5900, + "Ġfilled": 5901, + "phas": 5902, + "ĠElect": 5903, + "endment": 5904, + "ĠExpl": 5905, + "Ġunable": 5906, + "ns": 5907, + "mo": 5908, + "Ġvast": 5909, + "obe": 5910, + "Ġidentify": 5911, + "apping": 5912, + "ĠCarolina": 5913, + "gress": 5914, + "Ġprote": 5915, + "Ġfish": 5916, + "Ġcircumstances": 5917, + "razy": 5918, + "ĠPhot": 5919, + "Ġbodies": 5920, + "ĠMur": 5921, + "Ġdeveloping": 5922, + "ĠAR": 5923, + "Ġexperienced": 5924, + "Ġsubstant": 5925, + "ĠBoard": 5926, + "esome": 5927, + "Ġdomestic": 5928, + "Ġcombined": 5929, + "ĠPut": 5930, + "Ġchemical": 5931, + "ĠChild": 5932, + "Ġpool": 5933, + "ĠCy": 5934, + "Ġegg": 5935, + "cons": 5936, + "sters": 5937, + "Ġhurt": 5938, + "Ġmarkets": 5939, + "Ġconservative": 5940, + "Ġsupporters": 5941, + "Ġagencies": 5942, + "idel": 5943, + "Ob": 5944, + "urb": 5945, + "Ġ43": 5946, + "ĠDefense": 5947, + "ye": 5948, + "ĠAp": 5949, + "dule": 5950, + "Ġtemperature": 5951, + "Ġconducted": 5952, + "ĠChief": 5953, + "Ġpulled": 5954, + "Ġfol": 5955, + "Last": 5956, + "onto": 5957, + "osis": 5958, + "VER": 5959, + "Des": 5960, + "ĠPan": 5961, + "First": 5962, + "Ġadvance": 5963, + "Ġlicense": 5964, + "rors": 5965, + "ĠJon": 5966, + "Ġimagine": 5967, + "Ġhell": 5968, + "Ġfixed": 5969, + "Ġincor": 5970, + "osite": 5971, + "ĠLog": 5972, + "icken": 5973, + "]:": 5974, + "Ġsurprise": 5975, + "hab": 5976, + "Ġcraft": 5977, + "olt": 5978, + "ĠJul": 5979, + "Ġdial": 5980, + "Ġrelevant": 5981, + "Ġentered": 5982, + "Ġleads": 5983, + "ĠAD": 5984, + "ĠClean": 5985, + "Ġpictures": 5986, + "essor": 5987, + "Ġalt": 5988, + "Ġpaying": 5989, + "Per": 5990, + "ĠMarket": 5991, + "Ġupdates": 5992, + "amily": 5993, + "ĠType": 5994, + "ĠHome": 5995, + "Ġ55": 5996, + "sembly": 5997, + "rome": 5998, + "83": 5999, + "Ġgreatest": 6000, + "Ġheight": 6001, + "Ġheav": 6002, + "aints": 6003, + "Ġlisten": 6004, + "aser": 6005, + "ĠSH": 6006, + "Ġcapable": 6007, + "acle": 6008, + "Ġperspect": 6009, + "inating": 6010, + "Ġoffering": 6011, + "rypt": 6012, + "ĠDevelop": 6013, + "abin": 6014, + "rc": 6015, + "Ġbright": 6016, + "alty": 6017, + "arrow": 6018, + "Ġsuppl": 6019, + "inding": 6020, + "acked": 6021, + "gypt": 6022, + "ĠAnother": 6023, + "pg": 6024, + "ĠVirginia": 6025, + "ĠLu": 6026, + "Ġplanned": 6027, + "Ġpit": 6028, + "Ġsweet": 6029, + "Type": 6030, + "ĠDi": 6031, + "Ġtypically": 6032, + "ĠFrancisco": 6033, + "Ġprospect": 6034, + "ĠDan": 6035, + "Ġteen": 6036, + "rees": 6037, + "Ġsched": 6038, + "Ġhol": 6039, + "Ġscr": 6040, + "Ġlots": 6041, + "life": 6042, + "Ġnewsp": 6043, + "Ġforget": 6044, + "ĠNone": 6045, + "ĠMiddle": 6046, + "ĠRyan": 6047, + "edd": 6048, + "Ġsevere": 6049, + "Ġsuit": 6050, + "ller": 6051, + "93": 6052, + "Ġcorrespond": 6053, + "Ġexplos": 6054, + "uations": 6055, + "Ġflag": 6056, + "game": 6057, + "rid": 6058, + "Ġprin": 6059, + "ĠData": 6060, + "Ġdeploy": 6061, + "ĠEnter": 6062, + "suit": 6063, + "ghan": 6064, + "ĠMen": 6065, + "Ġthoughts": 6066, + "Ġmatters": 6067, + "Ġadapt": 6068, + "ĠAri": 6069, + "Ġfill": 6070, + "Ġforth": 6071, + "Ġsam": 6072, + "Ġ41": 6073, + "Ġpayment": 6074, + "ĠHor": 6075, + "Ġspring": 6076, + "duc": 6077, + "Ġlosing": 6078, + "Ġbringing": 6079, + "FO": 6080, + "ala": 6081, + "Ġdistribution": 6082, + "hered": 6083, + "bour": 6084, + "ĠIsraeli": 6085, + "oma": 6086, + "Ġcombination": 6087, + "Ġplenty": 6088, + "VE": 6089, + "Can": 6090, + "ĠHaw": 6091, + "Ġperman": 6092, + "ĠSpecial": 6093, + "Ġtow": 6094, + "Ġseeking": 6095, + "Ġexamples": 6096, + "Ġclasses": 6097, + "cr": 6098, + "Ġbeer": 6099, + "Ġmoves": 6100, + "ĠIP": 6101, + "ĠKn": 6102, + "Ġpanel": 6103, + "Even": 6104, + "Ġproperly": 6105, + "Ġris": 6106, + "Ġplug": 6107, + "Ġestimated": 6108, + "Every": 6109, + "Ġdefensive": 6110, + "agraph": 6111, + "Ġpregn": 6112, + "Ġinstit": 6113, + "ĠVict": 6114, + "Ġvolume": 6115, + "Ġpositions": 6116, + "Ġlinks": 6117, + "ĠProgram": 6118, + "ĠWeek": 6119, + "agues": 6120, + "Ġtransform": 6121, + "ker": 6122, + "ĠCEO": 6123, + "Ġcas": 6124, + "Ġopponent": 6125, + "Ġtweet": 6126, + "ĠCode": 6127, + "Ġshop": 6128, + "Ġfly": 6129, + "Ġtalks": 6130, + "Ġbag": 6131, + "Phone": 6132, + "Ġaid": 6133, + "Ġplants": 6134, + "Ġ65": 6135, + "Ġattorney": 6136, + "arters": 6137, + "quest": 6138, + "ĠMagic": 6139, + "Ġbegins": 6140, + "Ġmyster": 6141, + "Ġenvironmental": 6142, + "Ġstorage": 6143, + "NN": 6144, + "Ġmarg": 6145, + "Ġske": 6146, + "Ġmetal": 6147, + "elly": 6148, + "Ġordered": 6149, + "Ġremained": 6150, + "Ġloved": 6151, + "Ġprompt": 6152, + "Ġupdated": 6153, + "Ġexperts": 6154, + "Ġwalking": 6155, + "Ġancient": 6156, + "Ġperformed": 6157, + "ATE": 6158, + "Ġneither": 6159, + "iency": 6160, + "Ġmanufacture": 6161, + "ĠPak": 6162, + "Ġselected": 6163, + "Ġmine": 6164, + "Ġultimately": 6165, + "Ġexplan": 6166, + "Ġlabel": 6167, + "ĠServices": 6168, + "ributed": 6169, + "Trump": 6170, + "Ġsyn": 6171, + "ĠUlt": 6172, + "SC": 6173, + "Ġmeat": 6174, + "Ġgiant": 6175, + "ĠWars": 6176, + "ĠON": 6177, + "Ġadm": 6178, + "Ġinterpret": 6179, + "Ġevening": 6180, + "Ġevil": 6181, + "ĠBoston": 6182, + "ĠWild": 6183, + "ĠÃ": 6184, + "ĠBitcoin": 6185, + "ĠAmazon": 6186, + "Dr": 6187, + "ĠInformation": 6188, + "Ġobviously": 6189, + "Ġadvanced": 6190, + "Photo": 6191, + "olar": 6192, + "Ġweather": 6193, + "Ġsymbol": 6194, + "Ġsole": 6195, + "Ġpotentially": 6196, + "oster": 6197, + "Ġoriginally": 6198, + "mun": 6199, + "300": 6200, + "aze": 6201, + "essions": 6202, + "Ġdeck": 6203, + "Ġstood": 6204, + "Ġyouth": 6205, + "ĠBern": 6206, + "Rep": 6207, + "ĠTest": 6208, + "Ġbasically": 6209, + "otic": 6210, + "Ġinvolve": 6211, + "olit": 6212, + "lyn": 6213, + "See": 6214, + "Ġaircraft": 6215, + "Ġconfirm": 6216, + "EW": 6217, + "Ġmessages": 6218, + "ĠRichard": 6219, + "Ġkit": 6220, + "Ġprohib": 6221, + "Ġvulner": 6222, + "isters": 6223, + "Ġexistence": 6224, + "Ġturning": 6225, + "ĠSP": 6226, + "Ġdesire": 6227, + "Ġflat": 6228, + "Ġment": 6229, + "season": 6230, + "anges": 6231, + "Ġneighborhood": 6232, + "ĠLake": 6233, + "ATION": 6234, + "Ġpointed": 6235, + "bur": 6236, + "Ġinnov": 6237, + "ucks": 6238, + "UL": 6239, + "Ġprofessor": 6240, + "Ġexpressed": 6241, + "AB": 6242, + "icious": 6243, + "Ġ2002": 6244, + "ĠDev": 6245, + "Ġsession": 6246, + "Ġbare": 6247, + "sen": 6248, + "Ġdiss": 6249, + "ĠCath": 6250, + "ĠPass": 6251, + "ĠPoint": 6252, + "Ġdoctor": 6253, + "orrow": 6254, + "ailed": 6255, + "ĠRub": 6256, + "ĠDC": 6257, + "ĠCharl": 6258, + "person": 6259, + "Ġwriter": 6260, + "ighters": 6261, + "ureau": 6262, + "Ġoblig": 6263, + "Ġrecorded": 6264, + "Ġbroke": 6265, + "Ġorders": 6266, + "ilty": 6267, + "Ġmotion": 6268, + "inity": 6269, + "law": 6270, + "adium": 6271, + "Ġimmigration": 6272, + "Ġcontrast": 6273, + "Ġbatt": 6274, + "Ġexcellent": 6275, + "Ġtechnical": 6276, + "ami": 6277, + "Ġtun": 6278, + "Ġcloud": 6279, + "ĠYear": 6280, + "geon": 6281, + "Ġcreation": 6282, + "Ġstrange": 6283, + "Ġauth": 6284, + "Ġfort": 6285, + "born": 6286, + "Ġextent": 6287, + "ĠToday": 6288, + "ĠClub": 6289, + "Ġrain": 6290, + "Ġsample": 6291, + "Ġaccepted": 6292, + "Ġtact": 6293, + "Ġfired": 6294, + "ĠSon": 6295, + "Ġstands": 6296, + "Ġboot": 6297, + "Ġ47": 6298, + "Ġstatements": 6299, + "Ġversions": 6300, + "Ġselling": 6301, + "ounded": 6302, + "Ġ1990": 6303, + "Ġweren": 6304, + "ĠWatch": 6305, + "Ġexperiment": 6306, + "Post": 6307, + "Ġretail": 6308, + "uled": 6309, + "Inst": 6310, + "unte": 6311, + "ãĥ¼": 6312, + "Ġdepart": 6313, + "Ġbond": 6314, + "ivery": 6315, + "ompl": 6316, + "Ġreaction": 6317, + "ĠSyrian": 6318, + "ĠPac": 6319, + "apped": 6320, + "aniel": 6321, + "DP": 6322, + "Ġresolution": 6323, + "Ġreact": 6324, + "Ġapproved": 6325, + "onom": 6326, + "mond": 6327, + "ĠOffic": 6328, + "---": 6329, + "Ġreplace": 6330, + "Ġtack": 6331, + "Ġsport": 6332, + "Ġchain": 6333, + "Ġemergency": 6334, + "rad": 6335, + "ĠPalestin": 6336, + "Ġ46": 6337, + "Ġautomatically": 6338, + "Ġroute": 6339, + "Ġpal": 6340, + "Ġbanks": 6341, + "ĠParis": 6342, + "ĠMedia": 6343, + "road": 6344, + "icing": 6345, + "ixt": 6346, + "isted": 6347, + "Ġgrew": 6348, + "Ġcoord": 6349, + "ĠWhere": 6350, + "omin": 6351, + "Ġsubs": 6352, + "��": 6353, + "Ġ±": 6354, + "Ġcorporate": 6355, + "Ġselection": 6356, + "noon": 6357, + "ĠReport": 6358, + "cs": 6359, + "cluding": 6360, + "orders": 6361, + "anche": 6362, + "ĠIts": 6363, + "Ġslowly": 6364, + "ĠEgypt": 6365, + "ĠAcc": 6366, + "Ġcolle": 6367, + "iques": 6368, + "EX": 6369, + "Ġattempts": 6370, + "url": 6371, + "ĠCross": 6372, + "Ġfindings": 6373, + "ĠSC": 6374, + "ĠOR": 6375, + "Ġindex": 6376, + "ensity": 6377, + "ĠWay": 6378, + "ĠLand": 6379, + "Ġshock": 6380, + "dis": 6381, + "Ġdynam": 6382, + "Ġcart": 6383, + "mosp": 6384, + "Since": 6385, + "iest": 6386, + "ĠBoy": 6387, + "Ġstorm": 6388, + "ĠContin": 6389, + "2013": 6390, + "hew": 6391, + "ilit": 6392, + "Ġessential": 6393, + "iquid": 6394, + "Other": 6395, + "ivered": 6396, + "Ġreasonable": 6397, + "Act": 6398, + "Ġsubsequ": 6399, + "ĠPack": 6400, + "ĠFort": 6401, + "Ġconsidering": 6402, + "Ġuniversity": 6403, + "log": 6404, + "Ġmarried": 6405, + "Ġillust": 6406, + "ĠTrue": 6407, + "£ı": 6408, + "Ġnumerous": 6409, + "rastructure": 6410, + "Ġseriously": 6411, + "Ġreferred": 6412, + "ua": 6413, + "Ġconsistent": 6414, + "onna": 6415, + "ĠReal": 6416, + "ruption": 6417, + "ciples": 6418, + "Ġfacts": 6419, + "91": 6420, + "otes": 6421, + "erg": 6422, + "Then": 6423, + "Ġaccompl": 6424, + "Note": 6425, + "Ġrevenue": 6426, + "Ġpassing": 6427, + "Ġmal": 6428, + "een": 6429, + "ĠYet": 6430, + "Ġgather": 6431, + "terday": 6432, + "ework": 6433, + "ĠAuthor": 6434, + "Pe": 6435, + "Ġoptim": 6436, + "Ġrub": 6437, + "Ġè£ı": 6438, + "Ġunknown": 6439, + "stone": 6440, + "Ġunion": 6441, + "olve": 6442, + "Ġopportunities": 6443, + "Ġbrowser": 6444, + "ĠWal": 6445, + "ĠCost": 6446, + "Ġreporting": 6447, + "sts": 6448, + "pet": 6449, + "Ġsand": 6450, + "Ġsuddenly": 6451, + "Ġsurprising": 6452, + "ĠVR": 6453, + "Ġsomewhat": 6454, + "ĠBas": 6455, + "ulture": 6456, + "izz": 6457, + "ĠCD": 6458, + "Ġchallenges": 6459, + "Ġsettings": 6460, + "Ġexperiences": 6461, + "ĠFull": 6462, + "Ġcann": 6463, + "Ġreceiving": 6464, + "EST": 6465, + "Ġjoint": 6466, + "Ġcultural": 6467, + "Ġast": 6468, + "82": 6469, + "astern": 6470, + "ceived": 6471, + "ĠCru": 6472, + "Ġbull": 6473, + "pired": 6474, + "amm": 6475, + "Ġfacing": 6476, + "power": 6477, + "Ġboss": 6478, + "ĠHol": 6479, + "Ġinstr": 6480, + "Ġincreasingly": 6481, + "Ġshift": 6482, + "Ġstreets": 6483, + "ĠWilliams": 6484, + "abb": 6485, + "Ġlie": 6486, + "Ġlaugh": 6487, + "ĠCa": 6488, + "PL": 6489, + "Ġadults": 6490, + "Ġcustomer": 6491, + "Ġobtained": 6492, + "Ġsupporting": 6493, + "html": 6494, + "fire": 6495, + "Ġdetailed": 6496, + "Ġpicked": 6497, + "ĠRight": 6498, + "lder": 6499, + "EE": 6500, + "stood": 6501, + "ĠKim": 6502, + "Ġwire": 6503, + "Ġsight": 6504, + "Ġdevelopers": 6505, + "Ġpersons": 6506, + "Ġsad": 6507, + "Ġcup": 6508, + "Ġwarning": 6509, + "Ġboys": 6510, + "long": 6511, + "Ġbird": 6512, + "fo": 6513, + "Ġwal": 6514, + "Ġobserved": 6515, + "Ġzone": 6516, + "iveness": 6517, + "Ġchannel": 6518, + "cript": 6519, + "Ġrefused": 6520, + "ĠAgain": 6521, + "Ġsuc": 6522, + "Ġspokesman": 6523, + "ĠRef": 6524, + "rite": 6525, + "ouston": 6526, + "ãĥ³": 6527, + "ĠSher": 6528, + "Ġacts": 6529, + "ĠName": 6530, + "Ġstruggle": 6531, + "arry": 6532, + "ometimes": 6533, + "Ġdiscrim": 6534, + "HT": 6535, + "Ġcategory": 6536, + "Ġrealize": 6537, + "Ġemployee": 6538, + "ĠAfghan": 6539, + "enger": 6540, + "Ġguns": 6541, + "ĠSteve": 6542, + "ĠMot": 6543, + "ĠOl": 6544, + "oked": 6545, + "Ġthick": 6546, + "Ġfairly": 6547, + "illy": 6548, + "Ġsurve": 6549, + "ĠMat": 6550, + "weight": 6551, + "âĶ": 6552, + "Ġtroops": 6553, + "Ġagents": 6554, + "Ġbattery": 6555, + "Ġmotiv": 6556, + "á": 6557, + "Sec": 6558, + "den": 6559, + "overy": 6560, + "LS": 6561, + "Ġflu": 6562, + "Ġconfident": 6563, + "ĠOper": 6564, + "Ġempty": 6565, + "Ġphen": 6566, + "Ġsector": 6567, + "Ġexcited": 6568, + "Ġremote": 6569, + "aph": 6570, + "oen": 6571, + "Ġdestroyed": 6572, + "Ġmoral": 6573, + "ĠHP": 6574, + "ĠRon": 6575, + "Ġdress": 6576, + "ĠBat": 6577, + "Ġlit": 6578, + "ĠMS": 6579, + "Ġaf": 6580, + "HL": 6581, + "rum": 6582, + "isms": 6583, + "Ġshouldn": 6584, + "Ġsympt": 6585, + "ĠToronto": 6586, + "hetic": 6587, + "Ġcarbon": 6588, + "Ġinstalled": 6589, + "Ġviolent": 6590, + "Ġsolar": 6591, + "ja": 6592, + "Ġpractices": 6593, + "Ġride": 6594, + "ĠPenn": 6595, + "Ġimproved": 6596, + "Ġaudio": 6597, + "Ġbehavi": 6598, + "ĠPS": 6599, + "Ġeating": 6600, + "Data": 6601, + "ĠReview": 6602, + "pass": 6603, + "claim": 6604, + "uated": 6605, + "angers": 6606, + "chen": 6607, + "Ġproperties": 6608, + "Ġanywhere": 6609, + "Another": 6610, + "Ġblow": 6611, + "ĠJackson": 6612, + "Ġproud": 6613, + "Ġplane": 6614, + "lines": 6615, + "Ġsquare": 6616, + "Ġproof": 6617, + "ansas": 6618, + "Ġtalked": 6619, + "makers": 6620, + "Ġsister": 6621, + "Ġholds": 6622, + "Ġresident": 6623, + "Ġ==": 6624, + "Ġresistance": 6625, + "Ġsplit": 6626, + "Ġprosecut": 6627, + "Ġconfidence": 6628, + "resents": 6629, + "Ġcuts": 6630, + "Ġexception": 6631, + "Ġzero": 6632, + "Getty": 6633, + "Ġcopyright": 6634, + "Ġtotally": 6635, + "ormal": 6636, + "ifications": 6637, + "ĠAustralian": 6638, + "Ġsick": 6639, + "Ġ150": 6640, + "Ġhousehold": 6641, + "Ġfees": 6642, + "Ġdrivers": 6643, + "ogen": 6644, + "ĠNY": 6645, + "Ġnecessarily": 6646, + "Ġregulations": 6647, + "earing": 6648, + "sl": 6649, + "Ġperspective": 6650, + "care": 6651, + "icial": 6652, + "His": 6653, + "Ġescape": 6654, + "Ġsurprised": 6655, + "ĠVan": 6656, + "urrent": 6657, + "Ġvac": 6658, + "81": 6659, + "ĠThus": 6660, + "Ġemphas": 6661, + "ĠChampions": 6662, + "ĠIce": 6663, + "Ġnarr": 6664, + "Ġheads": 6665, + "Ġcausing": 6666, + "bel": 6667, + "fortunately": 6668, + "ĠMa": 6669, + "Ġtargets": 6670, + "cipl": 6671, + "Ġafternoon": 6672, + "Ġadds": 6673, + "ĠMaybe": 6674, + "ĠFour": 6675, + "essed": 6676, + "plete": 6677, + "Ġusual": 6678, + "cho": 6679, + "ingu": 6680, + "Ġwithd": 6681, + "ĠEnergy": 6682, + "ĠEconom": 6683, + "OO": 6684, + "Ġarticles": 6685, + "Ġinjured": 6686, + "Ġmanage": 6687, + "Ġexplains": 6688, + "Ġdiagn": 6689, + "Rec": 6690, + "atures": 6691, + "Ġlinked": 6692, + "Ġdiscussed": 6693, + "Ġexplo": 6694, + "Ġoccasion": 6695, + "athan": 6696, + "Ġopposite": 6697, + "Ġfaces": 6698, + "Ġdenied": 6699, + "ĠKnight": 6700, + "Ġnut": 6701, + "Ġapproximately": 6702, + "Ġdisappoint": 6703, + "onymous": 6704, + "ĠBest": 6705, + "ĠLo": 6706, + "ĠHy": 6707, + "ĠAff": 6708, + "Ġvoting": 6709, + "anwhile": 6710, + "ĠIII": 6711, + "Ġinstitutions": 6712, + "agram": 6713, + "ĠDaily": 6714, + "Ġdrag": 6715, + "Ġnearby": 6716, + "Ġguilty": 6717, + "Ġconver": 6718, + "Pre": 6719, + "ship": 6720, + "Ġreward": 6721, + "Ġphilosoph": 6722, + "ĠSS": 6723, + "ugh": 6724, + "Ġapps": 6725, + "friend": 6726, + "Ġupper": 6727, + "Ġadvert": 6728, + "Ġsnow": 6729, + "Ġfrust": 6730, + "Ġourselves": 6731, + "Fr": 6732, + "ĠDie": 6733, + "ampion": 6734, + "Ġdismiss": 6735, + "Ġcere": 6736, + "Ġsignal": 6737, + "from": 6738, + "Ġ).": 6739, + "Ġ52": 6740, + "Ġcrimes": 6741, + "itors": 6742, + "estival": 6743, + "useum": 6744, + "Ġcouncil": 6745, + "ĠSaud": 6746, + "May": 6747, + "ĠGun": 6748, + "ician": 6749, + "ether": 6750, + "Ġsufficient": 6751, + "ĠHen": 6752, + "sole": 6753, + "Ġhistorical": 6754, + "ĠFar": 6755, + "ĠTurn": 6756, + "Ġpin": 6757, + "Ġsucceed": 6758, + "mat": 6759, + "lymp": 6760, + "Ġtradition": 6761, + "ĠOk": 6762, + "Ġcro": 6763, + "Ġdescription": 6764, + "alle": 6765, + "Ġsky": 6766, + "Te": 6767, + "Ġwidely": 6768, + "Ġwave": 6769, + "Ġdefinition": 6770, + "ĠJews": 6771, + "Ġcycle": 6772, + "Ġrefere": 6773, + "Ġbrings": 6774, + "usal": 6775, + "Ġalive": 6776, + "Ġfrequently": 6777, + "Ġintention": 6778, + "ĠControl": 6779, + "lv": 6780, + "ystem": 6781, + "Ġprivacy": 6782, + "gent": 6783, + "rence": 6784, + "ĠQuest": 6785, + "ĠChristmas": 6786, + "Ġrail": 6787, + "Ġcooper": 6788, + "Ġtested": 6789, + "ĠCapt": 6790, + "asks": 6791, + "Ġcomfortable": 6792, + "Ġdelivered": 6793, + "scape": 6794, + "Ġdepth": 6795, + "ĠGOP": 6796, + "Ġwrites": 6797, + "Ġassets": 6798, + "Ġsav": 6799, + "iments": 6800, + "Ġtransition": 6801, + "Ġartist": 6802, + "ĠLook": 6803, + "Ġlob": 6804, + "Ġcomponents": 6805, + "arity": 6806, + "Ġwalked": 6807, + "Ġroot": 6808, + "Ġparticipants": 6809, + "Ġnoticed": 6810, + "Ġresc": 6811, + "Ġnav": 6812, + "ĠAdminist": 6813, + "da": 6814, + "utral": 6815, + "plate": 6816, + "Ġimportance": 6817, + "Ġassert": 6818, + "iously": 6819, + "cription": 6820, + "Ġinjuries": 6821, + "ĠCheck": 6822, + "Ġregistered": 6823, + "Ġintent": 6824, + "Ġmissed": 6825, + "ographic": 6826, + "Ġsentence": 6827, + "ounter": 6828, + "Ġassistance": 6829, + "evin": 6830, + "Ġdatabase": 6831, + "Ġbuildings": 6832, + "Ġclassic": 6833, + "Ġthinks": 6834, + "ĠOhio": 6835, + "Pr": 6836, + "ugg": 6837, + "Ġfee": 6838, + "pan": 6839, + "Ġeffectively": 6840, + "Ġfacility": 6841, + "Ġbear": 6842, + "Ġchapter": 6843, + "Ġdogs": 6844, + "ĠColumb": 6845, + "Ġlatter": 6846, + "itial": 6847, + "Ġadmitted": 6848, + "TV": 6849, + "ĠGeorg": 6850, + "Ġposts": 6851, + "\\\\": 6852, + "Ġlawyer": 6853, + "Ġequival": 6854, + "Ġmand": 6855, + "Ġcontrolled": 6856, + "ĠWalk": 6857, + "ĠAndrew": 6858, + "Ġmenu": 6859, + "amental": 6860, + "Ġprotected": 6861, + "va": 6862, + "Ġadministr": 6863, + "oral": 6864, + "Ġrein": 6865, + "ĠSar": 6866, + "Ġamounts": 6867, + "Ġnative": 6868, + "ĠMoon": 6869, + "Ġrepresents": 6870, + "Ġabandon": 6871, + "Ġcarrying": 6872, + "Ġtank": 6873, + "mary": 6874, + "Ġdeclared": 6875, + "Tube": 6876, + "Ġhat": 6877, + "Ġpunish": 6878, + "ellect": 6879, + "mes": 6880, + "Ġuniverse": 6881, + "ĠRod": 6882, + "phy": 6883, + "Ġinfrastructure": 6884, + "Ġ51": 6885, + "Ġopposed": 6886, + "ownt": 6887, + "ca": 6888, + "ĠMake": 6889, + "Ġhardware": 6890, + "Ġcoffee": 6891, + "Rel": 6892, + "bal": 6893, + "world": 6894, + "ĠSaf": 6895, + "ĠSea": 6896, + "inals": 6897, + "Ġowned": 6898, + "Ġhall": 6899, + "ersion": 6900, + "Ġdescribe": 6901, + "ĠPot": 6902, + "Ġportion": 6903, + "Ġatmosp": 6904, + "Ġgovernments": 6905, + "Ġdepending": 6906, + "Ġoffense": 6907, + "Ġtrick": 6908, + "awa": 6909, + "ĠLine": 6910, + "ĠVis": 6911, + "ĠHard": 6912, + "ĠOrig": 6913, + "ĠClick": 6914, + "Ġdesk": 6915, + "ĠValley": 6916, + "ĠSov": 6917, + "Ġmovies": 6918, + "Ġremark": 6919, + "Ġmail": 6920, + "Ġconscious": 6921, + "Ġruling": 6922, + "ĠRights": 6923, + "Ġmedic": 6924, + "hent": 6925, + "ĠWomen": 6926, + "><": 6927, + "Ġreplaced": 6928, + "ĠPrem": 6929, + "ĠThanks": 6930, + "Ġrenew": 6931, + "ĠBall": 6932, + "iform": 6933, + "Ġshots": 6934, + "Comm": 6935, + "Ġarmed": 6936, + "Ġconstant": 6937, + "Ġtaste": 6938, + "Ġrealized": 6939, + "Ġbuff": 6940, + "Ġmo": 6941, + "Ġefficient": 6942, + "Most": 6943, + "oration": 6944, + "ifies": 6945, + "Ġcommunication": 6946, + "Ġflood": 6947, + "Ġconsequences": 6948, + "Ġanyway": 6949, + "igg": 6950, + "ĠGM": 6951, + "ĠThank": 6952, + "Ġiron": 6953, + "Ġevolution": 6954, + "ĠCop": 6955, + "twitter": 6956, + "Ġ95": 6957, + "Ġrelationships": 6958, + "adel": 6959, + "ĠYoung": 6960, + "Ġproposal": 6961, + "ayers": 6962, + "uilding": 6963, + "ĠHot": 6964, + "ORE": 6965, + "cos": 6966, + "Ġcollabor": 6967, + "PG": 6968, + "axy": 6969, + "Ġknowing": 6970, + "Ġsupports": 6971, + "owed": 6972, + "Ġcontrols": 6973, + "Ġmerely": 6974, + "umer": 6975, + "Ġathlet": 6976, + "Ġfashion": 6977, + "path": 6978, + "Ġgift": 6979, + "Ġera": 6980, + "AND": 6981, + "Ġkinds": 6982, + "ĠKorean": 6983, + "Ġlegit": 6984, + "ulous": 6985, + "Ġessentially": 6986, + "Ġtherap": 6987, + "nic": 6988, + "Ġsuffered": 6989, + "Ġhur": 6990, + "Ġpromise": 6991, + "Ġexcess": 6992, + "Ġoverw": 6993, + "Ġprime": 6994, + "ĠHouston": 6995, + "erry": 6996, + "ĠMs": 6997, + "RS": 6998, + "2012": 6999, + "Ġstores": 7000, + "ĠOlymp": 7001, + "Ġjourney": 7002, + "Although": 7003, + "Sub": 7004, + "ĠEduc": 7005, + "ĠChapter": 7006, + "Ġrequests": 7007, + "Ġconsumers": 7008, + "Ġtiny": 7009, + "Ġisol": 7010, + "ĠFair": 7011, + "ba": 7012, + "ĠYOU": 7013, + "Ġcrash": 7014, + "celer": 7015, + "Ġemotional": 7016, + "Ġgoods": 7017, + "Ġelected": 7018, + "Ġmoder": 7019, + "ĠLinux": 7020, + "Ġblocks": 7021, + "Ġisland": 7022, + "ĠSociety": 7023, + "Ġelections": 7024, + "Ġbroadcast": 7025, + "Ġcheap": 7026, + "Ġnations": 7027, + "Ġseasons": 7028, + "400": 7029, + "Ġwaste": 7030, + "ĠSat": 7031, + "Ġfields": 7032, + "employ": 7033, + "Ġprofile": 7034, + "Ġauthors": 7035, + "ALL": 7036, + "ĠGra": 7037, + "west": 7038, + "ĠTy": 7039, + "Ġdeaths": 7040, + "Ġvacc": 7041, + "Ġformed": 7042, + "Ġdu": 7043, + "Ġongoing": 7044, + "ĠMuslims": 7045, + "elf": 7046, + "igure": 7047, + "Ġassume": 7048, + "ĠUkraine": 7049, + "water": 7050, + "Ġcoast": 7051, + "Ġvoted": 7052, + "gor": 7053, + "ĠAS": 7054, + "ĠMichigan": 7055, + "aza": 7056, + "ĠArm": 7057, + "iro": 7058, + "Ġflex": 7059, + "asters": 7060, + "''": 7061, + "Ġwelcome": 7062, + "arl": 7063, + "Ġlocations": 7064, + "igation": 7065, + "ĠFil": 7066, + "Ġbuying": 7067, + "Ġarchitect": 7068, + "Ġharder": 7069, + "ĠCub": 7070, + "Ġinterface": 7071, + "Ġrestaurant": 7072, + "Ġdiscover": 7073, + "Ġexceed": 7074, + "Ġfavour": 7075, + "gery": 7076, + "Ġduty": 7077, + "Ġpitch": 7078, + "ador": 7079, + "ĠMach": 7080, + "boy": 7081, + "Ġresponded": 7082, + "Ġextended": 7083, + "hers": 7084, + "Many": 7085, + "raid": 7086, + "ifer": 7087, + "ĠIns": 7088, + "Ser": 7089, + "Ġmedium": 7090, + "she": 7091, + "ĠSports": 7092, + "Ġmagazine": 7093, + "utation": 7094, + "Ġlimits": 7095, + "ĠGall": 7096, + "Ġexternal": 7097, + "razil": 7098, + "Ġyounger": 7099, + "tle": 7100, + "Ġremind": 7101, + "ĠCON": 7102, + "Ġimmediate": 7103, + "Ġhidden": 7104, + "Ġvolunte": 7105, + "Ġsimpl": 7106, + "odcast": 7107, + "Ġphase": 7108, + "dr": 7109, + "Ġplot": 7110, + "Ġexposure": 7111, + "RI": 7112, + "ograp": 7113, + "vin": 7114, + "anish": 7115, + "ĠAcad": 7116, + "ĠEngine": 7117, + "Ġexpansion": 7118, + "ĠPay": 7119, + "Your": 7120, + "Ġpushed": 7121, + "ĠEll": 7122, + "ĠHead": 7123, + "Ġmarketing": 7124, + "ĠAC": 7125, + "ket": 7126, + "Ġhits": 7127, + "Ġgro": 7128, + "ĠAge": 7129, + "ĠScot": 7130, + "][": 7131, + "Ġstim": 7132, + "ĠiPhone": 7133, + "ĪĴ": 7134, + "Ġnarrow": 7135, + "ĠGetty": 7136, + "ĠTurkey": 7137, + "Ġperfectly": 7138, + "Ġenable": 7139, + "utch": 7140, + "Ġprecise": 7141, + "Ġregime": 7142, + "Ġshif": 7143, + "Ġcompens": 7144, + "gun": 7145, + "div": 7146, + "Ġchosen": 7147, + "ĠKen": 7148, + "Any": 7149, + "Ġtrees": 7150, + "Ġrecommended": 7151, + "ĠRen": 7152, + "uable": 7153, + "ĠHT": 7154, + "Follow": 7155, + "EG": 7156, + "ĠHand": 7157, + "ĠKenn": 7158, + "Ġarguments": 7159, + "Ġexists": 7160, + "Ġbike": 7161, + "ĠConserv": 7162, + "Ġbreaking": 7163, + "ĠGar": 7164, + "Ġcrazy": 7165, + "Ġvirtual": 7166, + "aylor": 7167, + "ixel": 7168, + "Ġ1980": 7169, + "Ġpermission": 7170, + "ĠSeries": 7171, + "Ġconsumer": 7172, + "Ġclosely": 7173, + "called": 7174, + "Ġ54": 7175, + "Ġhopes": 7176, + "Ġarray": 7177, + "ĠWin": 7178, + "ĠLabour": 7179, + "Ġspons": 7180, + "ĠIre": 7181, + "Ġpow": 7182, + "Ġreaders": 7183, + "Ġemployment": 7184, + "Ġcreature": 7185, + "Ġresulting": 7186, + "Ġaccurate": 7187, + "Ġmoments": 7188, + "Ġargued": 7189, + "Ġped": 7190, + "During": 7191, + "Ġ53": 7192, + "ĠTal": 7193, + "Ġsought": 7194, + "Ġsuffering": 7195, + "Ġicon": 7196, + "lee": 7197, + "Ġ($": 7198, + "alian": 7199, + "°": 7200, + "Ġpra": 7201, + "Ġbonus": 7202, + "(\"": 7203, + "ko": 7204, + "Ġacting": 7205, + "DE": 7206, + "fall": 7207, + "Ġcomparison": 7208, + "Ġsmooth": 7209, + "ĠNAS": 7210, + "upp": 7211, + "ĠJoseph": 7212, + "eping": 7213, + "ĠTake": 7214, + "ĠMid": 7215, + "Ġsending": 7216, + "fast": 7217, + "ĠFall": 7218, + "Ġdealing": 7219, + "user": 7220, + "ĠOrgan": 7221, + "Co": 7222, + "Ġattached": 7223, + "Ġsees": 7224, + "%.": 7225, + "Ġtypical": 7226, + "ART": 7227, + "Ġfinds": 7228, + "ĠAsia": 7229, + "umin": 7230, + "ĠCore": 7231, + "ĠEnt": 7232, + "inent": 7233, + "uce": 7234, + "ĠBlood": 7235, + "ĠNever": 7236, + "Ġemails": 7237, + "Ġhighlight": 7238, + "Ġconfront": 7239, + "atus": 7240, + "uted": 7241, + "Ġunus": 7242, + "Ġtopic": 7243, + "ĠAdam": 7244, + "Ġble": 7245, + "ati": 7246, + "Ġunderstood": 7247, + "Set": 7248, + "struct": 7249, + "TP": 7250, + "Ġmob": 7251, + "aa": 7252, + "ĠStart": 7253, + "pected": 7254, + "sell": 7255, + "Ġdedicated": 7256, + "ĠCA": 7257, + "uan": 7258, + "Ġsongs": 7259, + "escription": 7260, + "Ġtech": 7261, + "Ġrape": 7262, + "Ġaside": 7263, + "Ġgrant": 7264, + "Ġ56": 7265, + "sub": 7266, + "Ġargue": 7267, + "Ġcontaining": 7268, + "Ġschedule": 7269, + "Ġliberal": 7270, + "Ġpublicly": 7271, + "Ġheavily": 7272, + "ĠUt": 7273, + "iner": 7274, + "ĠSection": 7275, + "ĠCare": 7276, + "weet": 7277, + "ls": 7278, + "Dis": 7279, + "âĶĢ": 7280, + "ĠFollow": 7281, + "Back": 7282, + "ĠIT": 7283, + "Ġbes": 7284, + "ji": 7285, + "ĠHit": 7286, + "ested": 7287, + "Ġeverybody": 7288, + "ĠSwed": 7289, + "Ġfemin": 7290, + "Ġfacilities": 7291, + "Ġconven": 7292, + "Comp": 7293, + "ĠOS": 7294, + "core": 7295, + "Ġanx": 7296, + "Ġdivision": 7297, + "ĠCam": 7298, + "ĠStan": 7299, + "mates": 7300, + "Ġexplore": 7301, + "plom": 7302, + "Ġshares": 7303, + "pload": 7304, + "anes": 7305, + "Ġideal": 7306, + "eters": 7307, + "ĠBase": 7308, + "Ġplastic": 7309, + "Ġdistinct": 7310, + "ĠNetwork": 7311, + "ĠSeattle": 7312, + "Ġtrading": 7313, + "ensus": 7314, + "intend": 7315, + "Ġexhib": 7316, + "Ġinitially": 7317, + "ĠFood": 7318, + "Ġthousand": 7319, + "ĠBusiness": 7320, + "acter": 7321, + "Ġparagraph": 7322, + "Ġroughly": 7323, + "Ġwww": 7324, + "Ġcreative": 7325, + "ĠConf": 7326, + "Ġconsumption": 7327, + "Ġfilms": 7328, + "agan": 7329, + "Ġobtain": 7330, + "Ġtall": 7331, + "Ġtor": 7332, + "Ġacknowled": 7333, + "Ġgrown": 7334, + "alo": 7335, + "KE": 7336, + "Ġ400": 7337, + "enders": 7338, + "taining": 7339, + "UG": 7340, + "Ġsuicide": 7341, + "Ġwatched": 7342, + "ĠList": 7343, + "ali": 7344, + "rehens": 7345, + "Ġsurrounding": 7346, + "Ġpip": 7347, + "Ġflying": 7348, + "ĠJava": 7349, + "ordan": 7350, + "Ġserving": 7351, + "inations": 7352, + "post": 7353, + "Ġsho": 7354, + "Av": 7355, + "Ġjail": 7356, + "zy": 7357, + "Ġ1999": 7358, + "Ġ>": 9609, + "orous": 9610, + "Ġfirms": 9611, + "screen": 9612, + "una": 9613, + "Ġembarrass": 9614, + "ulse": 9615, + "Ġletting": 9616, + "Ġthrew": 9617, + "iley": 9618, + "Ġchannels": 9619, + "lan": 9620, + "ĠVegas": 9621, + "Ġsear": 9622, + "Ġfantastic": 9623, + "arre": 9624, + "uzzle": 9625, + "ĠDer": 9626, + "Those": 9627, + "Ġswing": 9628, + "Ġsheet": 9629, + "index": 9630, + "cover": 9631, + "ogan": 9632, + "Ġvariables": 9633, + "ĠTech": 9634, + "Ġspoken": 9635, + "achel": 9636, + "ĠDa": 9637, + "ĠMountain": 9638, + "Ġloaded": 9639, + "Ġfootage": 9640, + "version": 9641, + "Ġunl": 9642, + "ĠPhoenix": 9643, + "Ġthrowing": 9644, + "Ġfiring": 9645, + "Ġtracking": 9646, + "Ġwidth": 9647, + "Ġstruggling": 9648, + "rooms": 9649, + "otion": 9650, + "Ġmonthly": 9651, + "ĠServer": 9652, + "Ġeggs": 9653, + "open": 9654, + "MC": 9655, + "Ġ1993": 9656, + "Ġhired": 9657, + "Ġstayed": 9658, + "ĠAllen": 9659, + "Ġstro": 9660, + "Ġ98": 9661, + "step": 9662, + "ĠTurkish": 9663, + "Ġfabric": 9664, + "isting": 9665, + "ĠDom": 9666, + "Ġdates": 9667, + "Ġpron": 9668, + "Ġbasketball": 9669, + "Ġlucky": 9670, + "ĠArabia": 9671, + "Ġassumed": 9672, + "esty": 9673, + "Ġaffairs": 9674, + "Ġglad": 9675, + "ĠIndeed": 9676, + "ĠFA": 9677, + "ĠWord": 9678, + "Ġjoining": 9679, + "ifice": 9680, + "pread": 9681, + "irts": 9682, + "ĠSelect": 9683, + "Ġpopulations": 9684, + "aware": 9685, + "Ġnose": 9686, + "Ġcomplaints": 9687, + "start": 9688, + "Ġscoring": 9689, + "Thanks": 9690, + "Ġmining": 9691, + "Ġvisitors": 9692, + "SH": 9693, + "Ġdamaged": 9694, + "Ġcharacteristics": 9695, + "ĠPent": 9696, + "DC": 9697, + "Ġ83": 9698, + "ĠSix": 9699, + "rates": 9700, + "Ġflags": 9701, + "ĠBrew": 9702, + "dog": 9703, + "Mark": 9704, + "////": 9705, + "Ġexecution": 9706, + "Ġjoke": 9707, + "phones": 9708, + "Ġtestimony": 9709, + "Ġobst": 9710, + "QL": 9711, + "ĠCut": 9712, + "Ġstudied": 9713, + "ĠNintendo": 9714, + "icket": 9715, + "ĠNBC": 9716, + "Ġlad": 9717, + "ĠBra": 9718, + "ĠMoh": 9719, + "Ġkernel": 9720, + "Ġoverwhelming": 9721, + "Ġaged": 9722, + "Ġapplicable": 9723, + "ĠCond": 9724, + "Ġroads": 9725, + "ĠBlock": 9726, + "made": 9727, + "odge": 9728, + "Ġcommands": 9729, + "Ġoffices": 9730, + "veland": 9731, + "Ġtut": 9732, + "Ġreceiver": 9733, + "ĠFro": 9734, + "Ġshopping": 9735, + "ĠiP": 9736, + "ĠStre": 9737, + "ĠABC": 9738, + "Ġentertainment": 9739, + "ĠBow": 9740, + "orted": 9741, + "Mc": 9742, + "Ġreads": 9743, + "grad": 9744, + "ĠCollect": 9745, + "ĠâĪĴ": 9746, + "ĠCapital": 9747, + "ederation": 9748, + "Ġemployer": 9749, + "Ġinvolvement": 9750, + "Ġanxiety": 9751, + "alia": 9752, + "Ġroof": 9753, + "ĠAmong": 9754, + "ĠDemocrat": 9755, + "Ġstats": 9756, + "ĠVill": 9757, + "Ġconstitutional": 9758, + "Ġreferring": 9759, + "itty": 9760, + "Ġtackle": 9761, + "outube": 9762, + "Ġbacked": 9763, + "ĠHong": 9764, + "ĠBroad": 9765, + "Ġele": 9766, + "ĠOtt": 9767, + "Ġ1992": 9768, + "hour": 9769, + "achusetts": 9770, + "Cal": 9771, + "Ġdefeated": 9772, + "Ġ81": 9773, + "esp": 9774, + "Ġseemingly": 9775, + "was": 9776, + "ĠJenn": 9777, + "ĠKurd": 9778, + "Ġgene": 9779, + "Ġdiscount": 9780, + "Ret": 9781, + "ECT": 9782, + "();": 9783, + "Ġclubs": 9784, + "Ġsid": 9785, + "ĠMarsh": 9786, + "Check": 9787, + "Ġpp": 9788, + "ĠEag": 9789, + "idespread": 9790, + "Ġbeings": 9791, + "FT": 9792, + "Ġintroduction": 9793, + "ĠChange": 9794, + "ARD": 9795, + "Ġ110": 9796, + "adows": 9797, + "ierce": 9798, + "Ġmeal": 9799, + "author": 9800, + "ĠBang": 9801, + "lahoma": 9802, + "Ġranks": 9803, + "2011": 9804, + "????": 9805, + "max": 9806, + "Ġcollapse": 9807, + "Ġopens": 9808, + "Ġecho": 9809, + "Ġsoph": 9810, + "Ġracist": 9811, + "Ġenormous": 9812, + "Ġwaves": 9813, + "Ġtap": 9814, + "Ġcomprehensive": 9815, + ".--": 9816, + "ĠRoy": 9817, + "Ġfarmers": 9818, + "Related": 9819, + "aired": 9820, + "rones": 9821, + "ĠCrim": 9822, + "Ġproportion": 9823, + "Ġdesigns": 9824, + "Ġnegotiations": 9825, + "Ġvirtually": 9826, + "ĠBatman": 9827, + "Ġwarn": 9828, + "Ġlegitimate": 9829, + "mate": 9830, + "Ġconvention": 9831, + ",,": 9832, + "netic": 9833, + "ĠSD": 9834, + "Ġconsistently": 9835, + "Ġcompensation": 9836, + "Ġpunishment": 9837, + "Ġye": 9838, + "Ġtie": 9839, + "ĠBureau": 9840, + "irlf": 9841, + "ĠBu": 9842, + "ĠAren": 9843, + "ĠPhilipp": 9844, + "Ġknife": 9845, + "Ġmemories": 9846, + "ĠRoss": 9847, + "Ġangle": 9848, + "Ġ86": 9849, + "ĠThunder": 9850, + "Ġrend": 9851, + "ĠTour": 9852, + "Ġcounts": 9853, + "sung": 9854, + "ĠImp": 9855, + "Ġeducational": 9856, + "Ġaccessible": 9857, + "COM": 9858, + "Ġdrew": 9859, + "yer": 9860, + "Gl": 9861, + "amine": 9862, + "ORT": 9863, + "OB": 9864, + "IB": 9865, + "master": 9866, + "Ġtrials": 9867, + "ogy": 9868, + "har": 9869, + "ĠTrust": 9870, + "Ġpreferred": 9871, + "irlfriend": 9872, + "ĠNev": 9873, + "Ġbin": 9874, + "Ġcow": 9875, + "Page": 9876, + "Ġsignature": 9877, + "ĠBL": 9878, + "700": 9879, + "Ġretired": 9880, + "Ġbytes": 9881, + "Ġneighb": 9882, + "ĠLegend": 9883, + "Ġdevast": 9884, + "Ġsuspected": 9885, + "isons": 9886, + "ĠPokémon": 9887, + "scale": 9888, + "Ġcapabilities": 9889, + "Ġrevel": 9890, + "Ġcheese": 9891, + "dy": 9892, + "igrant": 9893, + "Ġfailing": 9894, + "bits": 9895, + "ĠHeroes": 9896, + "ĠGhost": 9897, + "ĠScient": 9898, + "Ġappointed": 9899, + "uri": 9900, + "Ġinstitution": 9901, + "Ġexpanded": 9902, + "greg": 9903, + "Ġmonitoring": 9904, + "Ġpodcast": 9905, + "Ġcoalition": 9906, + "Ġ96": 9907, + "Jo": 9908, + "Ġstolen": 9909, + "ĠSab": 9910, + "Ġstops": 9911, + "Ġholiday": 9912, + "Ġintr": 9913, + "Car": 9914, + "Black": 9915, + "ĠLGBT": 9916, + "Ġwarming": 9917, + "ĠAnderson": 9918, + "Ġ89": 9919, + "Ġproducer": 9920, + "Med": 9921, + "Ġaccuracy": 9922, + "ĠMarvel": 9923, + "izabeth": 9924, + "ĠPatrick": 9925, + "mony": 9926, + "Ġmini": 9927, + "acles": 9928, + "Ġovert": 9929, + "they": 9930, + "Ġmembership": 9931, + "ĠVen": 9932, + "Ġexch": 9933, + "Ġremoval": 9934, + "ĠDave": 9935, + "TY": 9936, + "mad": 9937, + "ĠFind": 9938, + "Ġadequ": 9939, + "Ġec": 9940, + "Ġteeth": 9941, + "Ġemotion": 9942, + "Ġperm": 9943, + "Ġsolely": 9944, + "db": 9945, + "Ġextraord": 9946, + "IGHT": 9947, + "cal": 9948, + "Ġguidelines": 9949, + "Ġdying": 9950, + "Ġsuspended": 9951, + "ĠPremier": 9952, + "ĠAnthony": 9953, + "elve": 9954, + "Ġdad": 9955, + "ĠEth": 9956, + "ĠFootball": 9957, + "Ġabandoned": 9958, + "Ġ<<": 9959, + "Ġmarch": 9960, + "Ġhorror": 9961, + "âĢ¦\"": 9962, + "Ġchildhood": 9963, + "Ġcampaigns": 9964, + "Ġlunch": 9965, + "ĠAlbert": 9966, + "block": 9967, + "âĸĪâĸĪ": 9968, + "ounding": 9969, + "Ġbone": 9970, + "organ": 9971, + "aders": 9972, + "ĠFlash": 9973, + "ĠDrive": 9974, + "Ġtonight": 9975, + "Ġwars": 9976, + "ĠFL": 9977, + "Ġformation": 9978, + "const": 9979, + "News": 9980, + "Ġcompe": 9981, + "orious": 9982, + "ĠStaff": 9983, + "Ġdiscussions": 9984, + "ĠProtection": 9985, + "ĠJam": 9986, + "Ġcriteria": 9987, + "Ġinstallation": 9988, + "Ġaccomplish": 9989, + "izza": 9990, + "Ġpublisher": 9991, + "Ġrescue": 9992, + "ĠTry": 9993, + "ULL": 9994, + "ĠSom": 9995, + "ĠHop": 9996, + "oret": 9997, + "ths": 9998, + "ordon": 9999, + "Ġpocket": 10000, + "ĠInv": 10001, + "Download": 10002, + "ĠCrime": 10003, + "Ġbene": 10004, + "ĠGuide": 10005, + "ĠAssembly": 10006, + "Ġparameters": 10007, + "IE": 10008, + "ĠAlexander": 10009, + "Ġconcert": 10010, + "ĠSche": 10011, + "Ġshoes": 10012, + "Ġvisiting": 10013, + "Ġrecall": 10014, + "Ġbub": 10015, + "Ġrural": 10016, + "Ġconcrete": 10017, + "ĠRos": 10018, + "Next": 10019, + "Russ": 10020, + "Ġloans": 10021, + "ĠShield": 10022, + "Ġtrem": 10023, + "hemat": 10024, + "kg": 10025, + "ĠHarris": 10026, + "isition": 10027, + "ĠMove": 10028, + "ĠFC": 10029, + "Ġfate": 10030, + "ĠCho": 10031, + "Ġtired": 10032, + "Ġprincipal": 10033, + "hist": 10034, + "iences": 10035, + "athy": 10036, + "Ġsevent": 10037, + "Ġmood": 10038, + "Ġstrategic": 10039, + "Ġdiseases": 10040, + "Ġforum": 10041, + "Ġtempor": 10042, + "Ġheadquarters": 10043, + "Par": 10044, + "ige": 10045, + "flix": 10046, + "Ġguitar": 10047, + "Ġ94": 10048, + "Only": 10049, + "Ġreleases": 10050, + "roph": 10051, + "================================": 10052, + "Ġ600": 10053, + "ĠContinue": 10054, + "igate": 10055, + "ĠCrit": 10056, + "system": 10057, + "Ġdisabled": 10058, + "Ġunexpected": 10059, + "ithub": 10060, + "Ġunclear": 10061, + "ĠEst": 10062, + "Ġcontrad": 10063, + "Ġstrategies": 10064, + "ventures": 10065, + "Ġpassage": 10066, + "AME": 10067, + "Ġimproving": 10068, + "Ġreveals": 10069, + "Ġdecrease": 10070, + "ova": 10071, + "Ġannoy": 10072, + "ĠShort": 10073, + "ĠLibrary": 10074, + "Ġcyber": 10075, + "nell": 10076, + "ĠHur": 10077, + "ĠCB": 10078, + "Ġphotograp": 10079, + "UI": 10080, + "Ġsed": 10081, + "Ge": 10082, + "Ġ87": 10083, + "Ġdiverse": 10084, + "Ġencouraged": 10085, + "Ġconspiracy": 10086, + "Ġbirds": 10087, + "Ġoperator": 10088, + "Ġhandful": 10089, + "Ġclassified": 10090, + "?)": 10091, + "Ġdramatic": 10092, + "Ġinvestigators": 10093, + "ito": 10094, + "Ġwidespread": 10095, + "ĠRoom": 10096, + "----------------------------------------------------------------": 10097, + "Ġcollective": 10098, + "Ġjournalist": 10099, + "String": 10100, + "Ġtemperatures": 10101, + "ila": 10102, + "Ġguid": 10103, + "Ġinspect": 10104, + "Ġmissile": 10105, + "ĠMayor": 10106, + "Ġmanual": 10107, + "Ġsimultane": 10108, + "Ġratings": 10109, + "Ġsuck": 10110, + "Ġ97": 10111, + "Ġuniversal": 10112, + "Ġpharm": 10113, + "Ġdisrupt": 10114, + "iano": 10115, + "AV": 10116, + "Ġft": 10117, + "Ġstatist": 10118, + "olds": 10119, + "ĠWalker": 10120, + "php": 10121, + "Ġundert": 10122, + "ĠLas": 10123, + "ishop": 10124, + "ntil": 10125, + "reshold": 10126, + "ĠWhether": 10127, + "Ms": 10128, + "Ġdeny": 10129, + "ĠCloud": 10130, + "Ġprovider": 10131, + "Ġsurviv": 10132, + "ĠUpdate": 10133, + "has": 10134, + "Ġmistakes": 10135, + "charge": 10136, + "pled": 10137, + "rity": 10138, + "Ġnode": 10139, + "ĠMassachusetts": 10140, + "ools": 10141, + "lication": 10142, + "Ġfails": 10143, + "emale": 10144, + "ori": 10145, + "backs": 10146, + "Ġshirt": 10147, + "Ġ''": 10148, + "ĠNAT": 10149, + "Ġwaters": 10150, + "elson": 10151, + "Ġease": 10152, + "Ġscar": 10153, + "Ġcontents": 10154, + "mind": 10155, + "Ġcontribution": 10156, + "Ġshr": 10157, + "Ġhanded": 10158, + "Ġstability": 10159, + "Ġtrave": 10160, + "Em": 10161, + "Ġmirror": 10162, + "123": 10163, + "Ġweigh": 10164, + "Ġfiction": 10165, + "ouver": 10166, + "istant": 10167, + "rition": 10168, + "ĠFed": 10169, + "Ġphysically": 10170, + "Ġstake": 10171, + "ĠArticle": 10172, + "ĠArc": 10173, + "ĠLewis": 10174, + "ĠMind": 10175, + "Ġdemonstrate": 10176, + "Ġprofits": 10177, + "vision": 10178, + "omic": 10179, + "olid": 10180, + "Ġbattles": 10181, + "Ġdrives": 10182, + "Ġeastern": 10183, + "ĠSony": 10184, + "!!!": 10185, + "aration": 10186, + "vard": 10187, + "ĠGL": 10188, + "portation": 10189, + "Ġ92": 10190, + "Ġlawmakers": 10191, + "Ġprotecting": 10192, + "ĠEPA": 10193, + "Ġyeah": 10194, + "Ġshame": 10195, + "olph": 10196, + "even": 10197, + "xit": 10198, + "Ġattach": 10199, + "Ġrepresenting": 10200, + "Ġobs": 10201, + "ĠUtah": 10202, + "iffs": 10203, + "ĠFreedom": 10204, + "ó": 10205, + "AK": 10206, + "Ġincidents": 10207, + "itage": 10208, + "Ġviewers": 10209, + "cd": 10210, + "Ġmouse": 10211, + "Ġclar": 10212, + "Ġaccordance": 10213, + "Ġbot": 10214, + "cor": 10215, + "ĠSummer": 10216, + "held": 10217, + "Ġinnocent": 10218, + "Ġinitiative": 10219, + "ols": 10220, + "________________________________": 10221, + "Ġspots": 10222, + "pace": 10223, + "Ġconventional": 10224, + "Ġcorporations": 10225, + "Ġblocked": 10226, + "HD": 10227, + "attered": 10228, + "Ġrefers": 10229, + "Ġbuck": 10230, + "ĠDigital": 10231, + "120": 10232, + "Ġtopics": 10233, + "TF": 10234, + "Äģ": 10235, + "brid": 10236, + "reement": 10237, + "Ġunderlying": 10238, + "ĠMember": 10239, + "Ġinvestigating": 10240, + "Ġpregnancy": 10241, + "Ġtouchdown": 10242, + "ĠBand": 10243, + "ĠCaller": 10244, + "Ġinstances": 10245, + "PP": 10246, + "wa": 10247, + "Good": 10248, + "Ġ1991": 10249, + "ĠCold": 10250, + "Ġfears": 10251, + "Ġremarks": 10252, + "ĨĴ": 10253, + "atal": 10254, + "Ġmit": 10255, + "Ġexperiments": 10256, + "ipt": 10257, + "Color": 10258, + "indu": 10259, + "Update": 10260, + "Ġ93": 10261, + "Ag": 10262, + "Ġå": 10263, + "ancouver": 10264, + "Both": 10265, + "Ġjudges": 10266, + "Object": 10267, + "Ġstere": 10268, + "umbn": 10269, + "Ġparticipation": 10270, + "ĠStars": 10271, + "ĠJere": 10272, + "Ġweekly": 10273, + "ĠBan": 10274, + "Ġconversations": 10275, + "ĠPitt": 10276, + "uz": 10277, + "ĠIndiana": 10278, + "ĠKick": 10279, + "Ġinfection": 10280, + "Ġheroes": 10281, + "Ġsettled": 10282, + "Ġstrip": 10283, + "Ġhal": 10284, + "Ġdump": 10285, + "ĠSci": 10286, + "Ġles": 10287, + "Ġreferences": 10288, + "ĠURL": 10289, + "ĠBridge": 10290, + "Ġwanting": 10291, + "Force": 10292, + "Ġexclus": 10293, + "Meanwhile": 10294, + "mn": 10295, + "Ġgentle": 10296, + "maker": 10297, + "senal": 10298, + "ĠGro": 10299, + "ouri": 10300, + "ĠRain": 10301, + "ĠAlliance": 10302, + "Ġlift": 10303, + "ela": 10304, + "SD": 10305, + "ĠCleveland": 10306, + "Ġranked": 10307, + "Ġstadium": 10308, + "Ġdeadly": 10309, + "ä¸": 10310, + "Ġriding": 10311, + "aria": 10312, + "ĠArmor": 10313, + "Ġdocumentation": 10314, + "ĠGreece": 10315, + "reek": 10316, + "Ġlens": 10317, + "ĠSa": 10318, + "Ġgross": 10319, + "ĠEmer": 10320, + "agers": 10321, + "ĠDub": 10322, + "ĠRh": 10323, + "ĠAMD": 10324, + "Ġarrival": 10325, + "Ġdesert": 10326, + "Ġsupplement": 10327, + "ĠResp": 10328, + "Ġknee": 10329, + "Ġmargin": 10330, + "font": 10331, + "ogg": 10332, + "2010": 10333, + "ĠPir": 10334, + "ĠProm": 10335, + "ivals": 10336, + "Ġintake": 10337, + "Ġdifferently": 10338, + "ugs": 10339, + "Ġbits": 10340, + "cluded": 10341, + "Ġsearching": 10342, + "ĠDu": 10343, + "umble": 10344, + "Ġfunctional": 10345, + "ĠBaltimore": 10346, + "ĠCould": 10347, + "Ġdesired": 10348, + "Ġcircuit": 10349, + "ĠLyn": 10350, + "ĠGO": 10351, + "ĠFalse": 10352, + "repre": 10353, + "':": 10354, + "alties": 10355, + "Ġminim": 10356, + "Ġdrove": 10357, + "ĠShould": 10358, + "Ġhip": 10359, + "Ġpros": 10360, + "Ġutility": 10361, + "ĠNature": 10362, + "ĠMode": 10363, + "President": 10364, + "opp": 10365, + "rat": 10366, + "formance": 10367, + "Ġconcentration": 10368, + "Ġfont": 10369, + "ĠBud": 10370, + "Ġamid": 10371, + "Ġrevers": 10372, + "ĠML": 10373, + "Bar": 10374, + "Ġinteraction": 10375, + "Ġjurisd": 10376, + "Ġspells": 10377, + "dep": 10378, + "fil": 10379, + "Ġcivilians": 10380, + "utter": 10381, + "ĠCooper": 10382, + "ĠBelow": 10383, + "Ġentrance": 10384, + "Ġconvert": 10385, + "Ġcontroversy": 10386, + "owered": 10387, + "Ġcontrary": 10388, + "Ġarc": 10389, + "ĠExecutive": 10390, + "ĠOfficer": 10391, + "Ġpackages": 10392, + "Ġprogressive": 10393, + "width": 10394, + "Ġreserved": 10395, + "vol": 10396, + "ĠSamsung": 10397, + "Ġprinted": 10398, + "Ġcenters": 10399, + "Ġintroduce": 10400, + "ĠKennedy": 10401, + "Ġodds": 10402, + "Ġsurely": 10403, + "Ġindependence": 10404, + "Ġpassengers": 10405, + "reprene": 10406, + "ĠBeh": 10407, + "Ġloves": 10408, + "ĠESPN": 10409, + "Ġfacilit": 10410, + "Ġidentical": 10411, + "Ġdoct": 10412, + "Ġpartnership": 10413, + "conf": 10414, + "ĠHide": 10415, + "Ġconfused": 10416, + "ĠCow": 10417, + "Men": 10418, + "Ġwrest": 10419, + "ĠIraqi": 10420, + "Ġholes": 10421, + "ĠStudies": 10422, + "Ġpregnant": 10423, + "hard": 10424, + "Ġsignals": 10425, + "IX": 10426, + "Ġpulling": 10427, + "Ġgraduate": 10428, + "Ġnominee": 10429, + "Date": 10430, + "Ġpermitted": 10431, + "ĠâĤ¬": 10432, + "ĠOklahoma": 10433, + "Start": 10434, + "Ġauthorized": 10435, + "Ġalarm": 10436, + "ĠCos": 10437, + "van": 10438, + "Ġgenerations": 10439, + "cular": 10440, + "Ġdragon": 10441, + "ĠSoftware": 10442, + "ĠEdward": 10443, + "Ġcontroller": 10444, + "Sen": 10445, + "gered": 10446, + "ĠVik": 10447, + "Ġapproached": 10448, + "Thank": 10449, + "Ġcance": 10450, + "Ġformula": 10451, + "ĠSmall": 10452, + "Ġweakness": 10453, + "Ġramp": 10454, + "itudes": 10455, + "jud": 10456, + "Ġbrilliant": 10457, + "Ġaccus": 10458, + "source": 10459, + "Ġ800": 10460, + "ĠEvil": 10461, + "Sw": 10462, + "Ġhomeless": 10463, + "week": 10464, + "iens": 10465, + "rics": 10466, + "ĠThird": 10467, + "TO": 10468, + "Ġorganic": 10469, + "Ġpresentation": 10470, + "agh": 10471, + "ĠDownload": 10472, + "vation": 10473, + "Ġassembly": 10474, + "orable": 10475, + "holders": 10476, + "ĠBernie": 10477, + "ĠHelp": 10478, + "Ġtong": 10479, + "ĠFight": 10480, + "Ġbeach": 10481, + "Book": 10482, + "ĠLic": 10483, + "Ġrush": 10484, + "ĠRound": 10485, + "oup": 10486, + "ĠMarx": 10487, + "Ġcalculated": 10488, + "ĠDevil": 10489, + "ĠSarah": 10490, + "Ġoccasionally": 10491, + "Ġbullet": 10492, + "Available": 10493, + "gate": 10494, + "Ġ91": 10495, + "Ġhosp": 10496, + "Ġpromises": 10497, + "ĠHIV": 10498, + "ĠStadium": 10499, + "ĠStock": 10500, + "ĠCorporation": 10501, + "gage": 10502, + "NG": 10503, + "ĠCredit": 10504, + "Ġsne": 10505, + "ibl": 10506, + "Ġaccum": 10507, + "such": 10508, + "Ġterrorists": 10509, + "Ġconsciousness": 10510, + "ĠZh": 10511, + "Ġdrama": 10512, + "oola": 10513, + "piration": 10514, + "Ġlabour": 10515, + "ĠNin": 10516, + "Ġutter": 10517, + "Ġdemocratic": 10518, + "Ġassass": 10519, + "ilation": 10520, + "Ġgest": 10521, + "Ġabroad": 10522, + "Ġmetab": 10523, + "Ġsorts": 10524, + "Ġflav": 10525, + "UB": 10526, + "Ġmg": 10527, + "ĠNothing": 10528, + "ĠOd": 10529, + "Ġmusical": 10530, + "2009": 10531, + "Ġdrops": 10532, + "ocated": 10533, + "ateral": 10534, + "000000": 10535, + "Ġgre": 10536, + "Ġequality": 10537, + "Ġburden": 10538, + "Ġvig": 10539, + "ĠLeader": 10540, + "------------": 10541, + "Ġceremony": 10542, + "Ġfighter": 10543, + "Ġactors": 10544, + "Ġæ": 10545, + "aman": 10546, + "Fi": 10547, + "Ġalign": 10548, + "puter": 10549, + "Ġelder": 10550, + "ĠNSA": 10551, + "Ġrepresentation": 10552, + "ĠOntario": 10553, + "ITH": 10554, + "usalem": 10555, + "Ġharassment": 10556, + "itzer": 10557, + "Ġsymp": 10558, + "Ġboxes": 10559, + "ĠDR": 10560, + "Ġmanifest": 10561, + "atre": 10562, + "Ġ^": 10563, + "Ġdies": 10564, + "leton": 10565, + "Ġmissions": 10566, + "ethe": 10567, + "Ġresolve": 10568, + "Ġfollowers": 10569, + "Ġasc": 10570, + "Ġkm": 10571, + "lord": 10572, + "ammed": 10573, + "Ġsilent": 10574, + "ĠAssociated": 10575, + "Ġtiming": 10576, + "Ġprisoners": 10577, + "ĠKings": 10578, + "ĠFive": 10579, + "Ġtower": 10580, + "Ġapproaches": 10581, + "Ġprecisely": 10582, + "Ġbureau": 10583, + "ĠMother": 10584, + "ĠIss": 10585, + "Ġkeyboard": 10586, + "itual": 10587, + "Ġfunded": 10588, + "Ġstaying": 10589, + "Ġpsychological": 10590, + "Ġmile": 10591, + "ĠLeon": 10592, + "ĠBarb": 10593, + "will": 10594, + "Ġwider": 10595, + "ĠAtlantic": 10596, + "Ġtill": 10597, + "ĠRome": 10598, + "rot": 10599, + "Ġaccompan": 10600, + "Ġflour": 10601, + "aco": 10602, + "World": 10603, + "ĠExpress": 10604, + "ĠYu": 10605, + "Cor": 10606, + "Ġpleased": 10607, + "party": 10608, + "Ġpointing": 10609, + "Ġinflation": 10610, + "Ġroy": 10611, + "Ġ),": 10612, + "ainer": 10613, + "Ġwedding": 10614, + "ormon": 10615, + "Ġrequiring": 10616, + "Ġqualified": 10617, + "Ġsegment": 10618, + "END": 10619, + "Ġsizes": 10620, + "eals": 10621, + "Ġcorrupt": 10622, + "assador": 10623, + "Ġceleb": 10624, + "Ġdreams": 10625, + "ĠMess": 10626, + "Ġchecking": 10627, + "ĠVersion": 10628, + "Ġpreparing": 10629, + "Ġactively": 10630, + "ĠDiff": 10631, + "Ġlux": 10632, + "ĠWinter": 10633, + "acteria": 10634, + "ĠNE": 10635, + "Ġdeputy": 10636, + "Ġtransgender": 10637, + "Ġsummary": 10638, + "Ġinher": 10639, + "eries": 10640, + "char": 10641, + "ĠYan": 10642, + "Ġknock": 10643, + "ĠPath": 10644, + "Ġlip": 10645, + "roller": 10646, + "Ġimpression": 10647, + "Ġcelebrate": 10648, + "Ġslide": 10649, + "Ġguests": 10650, + "Ġclip": 10651, + "FS": 10652, + "Ġsavings": 10653, + "Ġcaptain": 10654, + "Ġlegacy": 10655, + "ĠDenver": 10656, + "Ġwounded": 10657, + "taboola": 10658, + "ACT": 10659, + "Ġpursue": 10660, + "Ġoxy": 10661, + "Ġq": 10662, + "Ġsemi": 10663, + "ĠNeed": 10664, + "ĠAffairs": 10665, + "Ġobsc": 10666, + "Ġchecked": 10667, + "Ġdual": 10668, + "Code": 10669, + "ĠMD": 10670, + "lem": 10671, + "ulty": 10672, + "Ġ©": 10673, + "ĠElizabeth": 10674, + "Ġcenturies": 10675, + "arded": 10676, + "src": 10677, + "Ġevident": 10678, + "ennis": 10679, + "atin": 10680, + "Ġunemployment": 10681, + "ĠMario": 10682, + "Ġintim": 10683, + "Christ": 10684, + "Ġbiological": 10685, + "Ġsoldier": 10686, + "ĠAdded": 10687, + "Ġmath": 10688, + "ĠGil": 10689, + "Ġbias": 10690, + "Ġdating": 10691, + "ĠOcean": 10692, + "Ġmice": 10693, + "Mus": 10694, + "hire": 10695, + "ĠTes": 10696, + "Server": 10697, + "limited": 10698, + "Size": 10699, + "Ġmeters": 10700, + "Ġrocket": 10701, + "essee": 10702, + "Ġcertificate": 10703, + "ĠIranian": 10704, + "ASS": 10705, + "Ġgrid": 10706, + "Dec": 10707, + "Ġrolling": 10708, + "commun": 10709, + "ĠSweden": 10710, + "bury": 10711, + "Ġtissue": 10712, + "Ġracism": 10713, + "ĠLocal": 10714, + "Ġmystery": 10715, + "Ġexamine": 10716, + "Ġstem": 10717, + "Ġsits": 10718, + "Ġhoped": 10719, + "oting": 10720, + "Ġdialogue": 10721, + "Ġpersu": 10722, + "Watch": 10723, + "lay": 10724, + "MAN": 10725, + "Ġchronic": 10726, + "ĠPortland": 10727, + "market": 10728, + "ĠSEC": 10729, + "Ġparallel": 10730, + "Ġscandal": 10731, + "Ġcarries": 10732, + "Ġphenomenon": 10733, + "human": 10734, + "acker": 10735, + "ĠOx": 10736, + "Ġretirement": 10737, + "tainment": 10738, + "ovie": 10739, + "ĠGear": 10740, + "Ġduties": 10741, + "Ġdose": 10742, + "Ġscroll": 10743, + "MB": 10744, + "inf": 10745, + "Ġsauce": 10746, + "Ġlandscape": 10747, + "reddit": 10748, + "ĠChampionship": 10749, + "ĠReddit": 10750, + "alid": 10751, + "Ġcoin": 10752, + "Ġovers": 10753, + "Ġposting": 10754, + "about": 10755, + "Ġfel": 10756, + "andy": 10757, + "Ġbold": 10758, + "Ġfocusing": 10759, + "effect": 10760, + "GR": 10761, + "Ġdeemed": 10762, + "Ġrecommendations": 10763, + "Ġstepped": 10764, + "Ġvoter": 10765, + "ĠDeep": 10766, + "ĠInstagram": 10767, + "Ġmoderate": 10768, + "ĠMaryland": 10769, + "Ġrestricted": 10770, + "ĠMB": 10771, + "ĠChall": 10772, + "Ġtob": 10773, + "Ġcir": 10774, + "ĠOcc": 10775, + "ĠEver": 10776, + "Ġcollaps": 10777, + "INFO": 10778, + "=-": 10779, + "ĠPict": 10780, + "ĠAccount": 10781, + "nc": 10782, + "Ġought": 10783, + "Ġexport": 10784, + "Ġdrunk": 10785, + "('": 10786, + "Ġwise": 10787, + "ĠMort": 10788, + "necess": 10789, + "Ġancest": 10790, + "ĠIncre": 10791, + "Ġfrequent": 10792, + "mir": 10793, + "Ġinterpretation": 10794, + "Ġdependent": 10795, + "Ġcoins": 10796, + "ĠBol": 10797, + "Video": 10798, + "ĠJustin": 10799, + "Ġfatal": 10800, + "Ġcooking": 10801, + "Ġconfusion": 10802, + "ipher": 10803, + "Ġcustody": 10804, + "ĠMorgan": 10805, + "omach": 10806, + "ĠGovernor": 10807, + "Ġrestaurants": 10808, + "eling": 10809, + "Ġacknowledged": 10810, + "Ġther": 10811, + "Ġgenes": 10812, + "ching": 10813, + "Hey": 10814, + "Ġtactics": 10815, + "ĠMexican": 10816, + "Ġvend": 10817, + "Ġhes": 10818, + "quer": 10819, + "Ġnoting": 10820, + "ĠCameron": 10821, + "Ġtargeting": 10822, + "rock": 10823, + "Ġcredits": 10824, + "Ġemotions": 10825, + "Ġrepresentatives": 10826, + "news": 10827, + "Ġlegislative": 10828, + "Ġremoving": 10829, + "Ġtweeted": 10830, + "ĠCarter": 10831, + "ĠFixed": 10832, + "Ġforcing": 10833, + "Ġspeaker": 10834, + "Ġmales": 10835, + "ĠVietnam": 10836, + "lined": 10837, + "Ġconcepts": 10838, + "Ġvoices": 10839, + "oir": 10840, + "ĠTrib": 10841, + "Whe": 10842, + "ĠJerusalem": 10843, + "ĠSant": 10844, + "Ġcul": 10845, + "Ġlady": 10846, + "ĠHawai": 10847, + "Ġarts": 10848, + "ĠInn": 10849, + "ĠMachine": 10850, + "ĠEmperor": 10851, + "Ġslot": 10852, + "gly": 10853, + "ĠProcess": 10854, + "III": 10855, + "Ġathletes": 10856, + "ĠTemple": 10857, + "ĠRepresent": 10858, + "Ġpresc": 10859, + "Ġtons": 10860, + "Ġgolden": 10861, + "Ġpunch": 10862, + "ĠGR": 10863, + "iverpool": 10864, + "Ġenact": 10865, + "Ġlobby": 10866, + "Ġmos": 10867, + "Ġpicking": 10868, + "Ġlifetime": 10869, + "Ġcognitive": 10870, + "Each": 10871, + "zo": 10872, + "Ġdub": 10873, + "Ġconsists": 10874, + "oln": 10875, + "Ġfestival": 10876, + "amous": 10877, + "Ġintellig": 10878, + "words": 10879, + "ĠSmart": 10880, + "Ġdele": 10881, + "Ġlapt": 10882, + "Ġmagical": 10883, + "ĠSin": 10884, + "bus": 10885, + "urities": 10886, + "ighth": 10887, + "ĠRuby": 10888, + "ĠSure": 10889, + "olving": 10890, + "Ġjun": 10891, + "OST": 10892, + "Ġimposed": 10893, + "Ġastron": 10894, + "Ġcorrel": 10895, + "ĠNS": 10896, + "ĠKit": 10897, + "ĠFuture": 10898, + "burn": 10899, + "Ġimmune": 10900, + "ocus": 10901, + "Ġcourses": 10902, + "ĠString": 10903, + "Ġlean": 10904, + "Ġghost": 10905, + "Ġoutcomes": 10906, + "Ġexpense": 10907, + "Ġeveryday": 10908, + "Ġacceptable": 10909, + "Ah": 10910, + "Ġequipped": 10911, + "Ġorange": 10912, + "FR": 10913, + "ĠDutch": 10914, + "Though": 10915, + "ĠRank": 10916, + "QU": 10917, + "ĠRoberts": 10918, + "what": 10919, + "rend": 10920, + "Ġdisappear": 10921, + "Ġspawn": 10922, + "ĠLam": 10923, + "ois": 10924, + "Ġdeserve": 10925, + "Ġminimal": 10926, + "Ġnervous": 10927, + "ĠWould": 10928, + "Ġrook": 10929, + "ĠVancouver": 10930, + "Ġresign": 10931, + "shire": 10932, + "ĠWorks": 10933, + "ĠBuild": 10934, + "Ġaffordable": 10935, + "ĠGary": 10936, + "ĠArena": 10937, + "Ġhanging": 10938, + "Ġimplications": 10939, + "ĠSong": 10940, + "Ġmaintaining": 10941, + "Ġguards": 10942, + "CON": 10943, + "Ġderived": 10944, + "Ġexecuted": 10945, + "Ġtheories": 10946, + "Ġquoted": 10947, + "ĠAndre": 10948, + "oga": 10949, + "seless": 10950, + "info": 10951, + "ĠBelg": 10952, + "Ġtears": 10953, + "ĠSurv": 10954, + "Ġbirthday": 10955, + "igious": 10956, + "immer": 10957, + "Ġspectrum": 10958, + "Ġarchitecture": 10959, + "Ġrecruit": 10960, + "arma": 10961, + "Table": 10962, + "Ġmonsters": 10963, + "ĠGov": 10964, + "Ġdestination": 10965, + "Ġattractive": 10966, + "Ġfoss": 10967, + "ĠMoreover": 10968, + "Ġpresents": 10969, + "THE": 10970, + "Ġreply": 10971, + "pton": 10972, + "Ġcum": 10973, + "Ġdelight": 10974, + "Ġaffects": 10975, + "Ġdonations": 10976, + "ĠToy": 10977, + "ĠHim": 10978, + "MENT": 10979, + "Ġovercome": 10980, + "itched": 10981, + "ĠFantasy": 10982, + "ĠHat": 10983, + "ĠBeast": 10984, + "bott": 10985, + "Ġinvestigations": 10986, + "Run": 10987, + "Ġhunting": 10988, + "di": 10989, + "fund": 10990, + "Ġsessions": 10991, + "estyle": 10992, + "Ġportray": 10993, + "oids": 10994, + "Yeah": 10995, + "Ġcommunicate": 10996, + "Ġcomedy": 10997, + "ĠYang": 10998, + "Ġbelt": 10999, + "ĠMarine": 11000, + "Ġpredicted": 11001, + "Play": 11002, + "Ġimportantly": 11003, + "Ġremarkable": 11004, + "Ġeliminate": 11005, + "David": 11006, + "Ġbind": 11007, + "VID": 11008, + "Ġadvocates": 11009, + "ĠGaza": 11010, + "imp": 11011, + "DB": 11012, + "ĠNa": 11013, + "ĠSimilar": 11014, + "IES": 11015, + "Ġcharity": 11016, + "vas": 11017, + "math": 11018, + "Ġâĸ": 11019, + "oker": 11020, + "ndum": 11021, + "Ġcaps": 11022, + "ĠHal": 11023, + "2000": 11024, + "ean": 11025, + "Ġfleet": 11026, + "Ġrecre": 11027, + "Right": 11028, + "Ġsleeping": 11029, + "ijing": 11030, + "kind": 11031, + "Ġdesignated": 11032, + "ä": 11033, + "Ġanimation": 11034, + "kee": 11035, + "ĠIntrodu": 11036, + "Ġ/>": 11037, + "Ġdelayed": 11038, + "Ġtremend": 11039, + "Ġcurious": 11040, + "Use": 11041, + "Ġlect": 11042, + "dam": 11043, + "Ġinnovation": 11044, + "ĠPoints": 11045, + "Ġloading": 11046, + "Ġdispute": 11047, + "ctic": 11048, + "irds": 11049, + "ĠBY": 11050, + "Ġnurs": 11051, + "ĠValue": 11052, + "IONS": 11053, + "ĠHum": 11054, + "Ġtemplate": 11055, + "mers": 11056, + "Ġappearances": 11057, + "ĠEntertainment": 11058, + "Ġtranslation": 11059, + "Ġsake": 11060, + "Ġbeneath": 11061, + "Ġinhib": 11062, + "Ġeuro": 11063, + "abetes": 11064, + "Ġstudying": 11065, + "ĠMas": 11066, + "Ġperceived": 11067, + "Ġexamined": 11068, + "Ġeager": 11069, + "Ġcoaches": 11070, + "Ġimper": 11071, + "chi": 11072, + "Ġproduces": 11073, + "\").": 11074, + "ĠEveryone": 11075, + "Ġmunicip": 11076, + "Ġgirlfriend": 11077, + "Ġhire": 11078, + "ĠVice": 11079, + "Ġsuitable": 11080, + "opy": 11081, + "Ġinequ": 11082, + "ĠDuke": 11083, + "fish": 11084, + "first": 11085, + "ĠObs": 11086, + "Ġinterior": 11087, + "ĠBruce": 11088, + "ĠRy": 11089, + "Ġanalys": 11090, + "Ġconsiderable": 11091, + "Ġforecast": 11092, + "Ġfert": 11093, + "orship": 11094, + "ĠDrug": 11095, + "ĠALL": 11096, + ":\"": 11097, + "thur": 11098, + "ĠMail": 11099, + "Ġballot": 11100, + "Ġinstantly": 11101, + "ĠChannel": 11102, + "Ġpicks": 11103, + "Ġ1989": 11104, + "Ġtent": 11105, + "oli": 11106, + "Ġcivilian": 11107, + "bling": 11108, + "ello": 11109, + "bu": 11110, + "Ġinch": 11111, + "Ġlogo": 11112, + "Ġcooperation": 11113, + "Ġwalks": 11114, + "Ġinvestments": 11115, + "Ġimprison": 11116, + "ĠFestival": 11117, + "ĠKy": 11118, + "Ġlegally": 11119, + "Ġgri": 11120, + "charg": 11121, + "Sl": 11122, + "Ġthreatening": 11123, + "duction": 11124, + "flow": 11125, + "Ġdismissed": 11126, + "ibraries": 11127, + "cap": 11128, + "ele": 11129, + "ĠMcG": 11130, + "ĠHarvard": 11131, + "ĠConservative": 11132, + "ĠCBS": 11133, + "png": 11134, + "Ġroots": 11135, + "ĠHaving": 11136, + "umbled": 11137, + "ĠFun": 11138, + "\\/": 11139, + "ĠSearch": 11140, + "plex": 11141, + "Ġdiscussing": 11142, + "Ġcontinu": 11143, + "ĠTai": 11144, + "ĠWik": 11145, + "Free": 11146, + "fit": 11147, + "Ġrefuse": 11148, + "Ġmanaging": 11149, + "Ġsynd": 11150, + "ipedia": 11151, + "walk": 11152, + "Ġprofessionals": 11153, + "Ġguidance": 11154, + "Ġuniversities": 11155, + "Ġassemb": 11156, + "untu": 11157, + "Finally": 11158, + "ASE": 11159, + "ĠAuto": 11160, + "ĠHad": 11161, + "Ġanniversary": 11162, + "LD": 11163, + "ĠDur": 11164, + "ĠUltimate": 11165, + "ihad": 11166, + "product": 11167, + "Ġtransit": 11168, + "Ġrestore": 11169, + "Ġexplaining": 11170, + "Ġasset": 11171, + "Ġtransferred": 11172, + "Ġburst": 11173, + "apolis": 11174, + "ĠMagazine": 11175, + "ĠCra": 11176, + "ĠBR": 11177, + "gged": 11178, + "ĠHE": 11179, + "Mich": 11180, + "bet": 11181, + "ĠLady": 11182, + "ylum": 11183, + "erves": 11184, + "Ġmeets": 11185, + "white": 11186, + "Log": 11187, + "Ġcorresponding": 11188, + "Ġinsisted": 11189, + "GG": 11190, + "Ġsurrounded": 11191, + "Ġtens": 11192, + "Ġlane": 11193, + "Ġcoinc": 11194, + "home": 11195, + "Ġexisted": 11196, + "ected": 11197, + "ĠDouble": 11198, + "lamm": 11199, + "Ġskept": 11200, + "exp": 11201, + "Ġperception": 11202, + "iev": 11203, + "ĠBeing": 11204, + "oft": 11205, + "Ġadopt": 11206, + ".:": 11207, + "];": 11208, + "Windows": 11209, + "Ġsatellite": 11210, + "ASH": 11211, + "Ġinfant": 11212, + "description": 11213, + "ĠMeanwhile": 11214, + "cm": 11215, + "oca": 11216, + "ĠTreat": 11217, + "actor": 11218, + "Ġtobacco": 11219, + "ĠNorm": 11220, + "emption": 11221, + "Ġflesh": 11222, + "Ġje": 11223, + "oop": 11224, + "ĠHeaven": 11225, + "Ġbeating": 11226, + "anim": 11227, + "Ġgathering": 11228, + "Ġcultiv": 11229, + "GO": 11230, + "abe": 11231, + "ĠJonathan": 11232, + "ĠSafety": 11233, + "Ġbadly": 11234, + "prot": 11235, + "Ġchoosing": 11236, + "Ġcontacted": 11237, + "Ġquit": 11238, + "Ġdistur": 11239, + "Ġstir": 11240, + "Ġtoken": 11241, + "Det": 11242, + "ĠPa": 11243, + "Ġfunctionality": 11244, + "003": 11245, + "some": 11246, + "Ġlimitations": 11247, + "Ġmeth": 11248, + "build": 11249, + "config": 11250, + "NT": 11251, + "rell": 11252, + "blem": 11253, + "ĠMom": 11254, + "Ġveterans": 11255, + "ĠHu": 11256, + "Ġtrends": 11257, + "arer": 11258, + "ĠGiven": 11259, + "ĠCaption": 11260, + "may": 11261, + "AST": 11262, + "Ġwondering": 11263, + "ĠClark": 11264, + "normal": 11265, + "Ġseparated": 11266, + "Ġdesp": 11267, + "stic": 11268, + "brew": 11269, + "Ġrelating": 11270, + "ĠNik": 11271, + "ĠFarm": 11272, + "Ġenthusi": 11273, + "good": 11274, + "deb": 11275, + "Ġactivist": 11276, + "Ġmart": 11277, + "Ġexplosion": 11278, + "ĠEconomic": 11279, + "Link": 11280, + "Ġinsight": 11281, + "Ġconvenient": 11282, + "Ġcounterpart": 11283, + "support": 11284, + "ĠVirt": 11285, + "agen": 11286, + "ĠTennessee": 11287, + "ĠSimon": 11288, + "ĠAward": 11289, + "OCK": 11290, + "ĠFigure": 11291, + "Ġoverseas": 11292, + "Ġpride": 11293, + "ĠCas": 11294, + "note": 11295, + "mg": 11296, + "Current": 11297, + "Ġdisplays": 11298, + "content": 11299, + "Ġtraveling": 11300, + "Ġhospitals": 11301, + "ĠFinancial": 11302, + "ĠPast": 11303, + "Ġdefendant": 11304, + "Ġstreaming": 11305, + "mble": 11306, + "ĠBerlin": 11307, + "uki": 11308, + "Ġdistribut": 11309, + "Ġantib": 11310, + "Ġchocolate": 11311, + "ĠCastle": 11312, + "Ġinterrupt": 11313, + "ĠRow": 11314, + "Ġconversion": 11315, + "Ġbugs": 11316, + "ĠRather": 11317, + "liest": 11318, + "LY": 11319, + "ĠJean": 11320, + "common": 11321, + "akh": 11322, + "Ġ130": 11323, + "otton": 11324, + "ĠDean": 11325, + "Ġamendment": 11326, + "Ġgameplay": 11327, + "ĠWarren": 11328, + "oda": 11329, + "Ġhighlights": 11330, + "Ġirre": 11331, + "ĠNATO": 11332, + "Ġballs": 11333, + "Ġdemanding": 11334, + "URE": 11335, + "ĠLuke": 11336, + "Figure": 11337, + "stop": 11338, + "onia": 11339, + "zone": 11340, + "izers": 11341, + "ĠWR": 11342, + "Ġawarded": 11343, + "Ġregulatory": 11344, + "ĠHart": 11345, + "ĠSN": 11346, + "pling": 11347, + "Ġsour": 11348, + "ĠPixel": 11349, + "usive": 11350, + "Ġfet": 11351, + "ĠSent": 11352, + "Ġautomatic": 11353, + "Ġfer": 11354, + "vernment": 11355, + "ĠKhan": 11356, + "TON": 11357, + "father": 11358, + "Ġextraordinary": 11359, + "throp": 11360, + "ĠPython": 11361, + "ĠGPU": 11362, + "Ġsexually": 11363, + "Ġdesktop": 11364, + "itivity": 11365, + "ĠAntonio": 11366, + "Ġorient": 11367, + "Ġears": 11368, + "obby": 11369, + "ouses": 11370, + "vertisements": 11371, + "Ġmanufacturers": 11372, + "icient": 11373, + "minute": 11374, + "Ġconviction": 11375, + "Ġgarden": 11376, + "public": 11377, + "Ġsatisfied": 11378, + "fold": 11379, + "OK": 11380, + "Ġinhab": 11381, + "ĠThink": 11382, + "Ġprogramme": 11383, + "Ġstomach": 11384, + "Ġcoordin": 11385, + "Ġholy": 11386, + "Ġthreshold": 11387, + "Ġrhet": 11388, + "Ġserial": 11389, + "Ġemployers": 11390, + "ĠEverything": 11391, + "rah": 11392, + "Ġbother": 11393, + "Ġbrands": 11394, + "Value": 11395, + "ĠTed": 11396, + "ĠPlanet": 11397, + "Ġpink": 11398, + "ĠFurthermore": 11399, + "sa": 11400, + "PE": 11401, + "reck": 11402, + "ĠUSD": 11403, + "otte": 11404, + "Ġ&&": 11405, + "Ġlanded": 11406, + "gets": 11407, + "Ġproducers": 11408, + "Ġhealthcare": 11409, + "Ġdominant": 11410, + "Ġdestro": 11411, + "Ġamended": 11412, + "chron": 11413, + "Ġfits": 11414, + "ĠSyd": 11415, + "ĠAuthority": 11416, + "ATCH": 11417, + "Ġfights": 11418, + "ĠLLC": 11419, + "Ġ---": 11420, + "ĠCorp": 11421, + "Ġtoxic": 11422, + "specific": 11423, + "ĠCorn": 11424, + "ĠChel": 11425, + "Ġtelephone": 11426, + "ĠPant": 11427, + "Ġmysterious": 11428, + "aunch": 11429, + "odox": 11430, + "media": 11431, + "Ġwitnesses": 11432, + "agu": 11433, + "Ġquestioned": 11434, + "ĠBrexit": 11435, + "ĠRemember": 11436, + "enez": 11437, + "Ġendorse": 11438, + "iatric": 11439, + "ĠIdent": 11440, + "Ġridiculous": 11441, + "110": 11442, + "Ġprayer": 11443, + "Ġscientist": 11444, + "Ġ1950": 11445, + "ĠAqu": 11446, + "Ġunderground": 11447, + "ĠUFC": 11448, + "mare": 11449, + "ĠLater": 11450, + "wich": 11451, + "Ġsubscrib": 11452, + "Ġhosts": 11453, + "Ġerr": 11454, + "Ġgrants": 11455, + "antom": 11456, + "Ġsummon": 11457, + "early": 11458, + "ĠClear": 11459, + "ĠPrim": 11460, + "Ġsuspension": 11461, + "Ġguaranteed": 11462, + "apper": 11463, + "Ġrice": 11464, + "ĠSean": 11465, + "ĠShin": 11466, + "Ġreferendum": 11467, + "Ġfled": 11468, + "rust": 11469, + "Ġ360": 11470, + "tery": 11471, + "Ġshocked": 11472, + "BR": 11473, + "ĠOil": 11474, + "ĠAllah": 11475, + "Ġpartly": 11476, + "Ġignor": 11477, + "Ġtransmission": 11478, + "Ġhomosexual": 11479, + "iversal": 11480, + "Ġhopefully": 11481, + "ãĤ¤": 11482, + "Ġlesson": 11483, + "Leg": 11484, + "Ġ..": 11485, + "Yet": 11486, + "table": 11487, + "appropri": 11488, + "rett": 11489, + "Ġboards": 11490, + "Ġincorrect": 11491, + "Ġbacteria": 11492, + "aru": 11493, + "amac": 11494, + "Ġsnap": 11495, + ".'\"": 11496, + "Ġparad": 11497, + "tem": 11498, + "heart": 11499, + "Ġavailability": 11500, + "Ġwisdom": 11501, + "Ġ(+": 11502, + "Ġpriest": 11503, + "ĠÂłĠÂł": 11504, + "Open": 11505, + "Ġspan": 11506, + "Ġparameter": 11507, + "Ġconvince": 11508, + "Ġ(%)": 11509, + "rac": 11510, + "Ġfo": 11511, + "Ġsafely": 11512, + "Ġconverted": 11513, + "ĠOlympic": 11514, + "Ġreserve": 11515, + "Ġhealing": 11516, + "ĠMine": 11517, + "Max": 11518, + "Ġinherent": 11519, + "ĠGraham": 11520, + "Ġintegrated": 11521, + "Dem": 11522, + "Ġpipeline": 11523, + "Ġapplying": 11524, + "Ġembed": 11525, + "ĠCharlie": 11526, + "Ġcave": 11527, + "2008": 11528, + "Ġconsensus": 11529, + "Ġrewards": 11530, + "Pal": 11531, + "ĠHTML": 11532, + "Ġpopularity": 11533, + "looking": 11534, + "ĠSword": 11535, + "ĠArts": 11536, + "')": 11537, + "Ġelectron": 11538, + "clusions": 11539, + "Ġintegrity": 11540, + "Ġexclusively": 11541, + "Ġgrace": 11542, + "Ġtorture": 11543, + "Ġburned": 11544, + "two": 11545, + "Ġ180": 11546, + "Produ": 11547, + "Ġentreprene": 11548, + "raphics": 11549, + "Ġgym": 11550, + "ricane": 11551, + "ĠTam": 11552, + "Ġadministrative": 11553, + "Ġmanufacturer": 11554, + "Ġvel": 11555, + "ĠNi": 11556, + "Ġisolated": 11557, + "ĠMedicine": 11558, + "Ġbackup": 11559, + "Ġpromoting": 11560, + "Ġcommander": 11561, + "Ġflee": 11562, + "ĠRussell": 11563, + "Ġforgotten": 11564, + "ĠMissouri": 11565, + "Ġresidence": 11566, + "mons": 11567, + "Ġresemb": 11568, + "Ġwand": 11569, + "Ġmeaningful": 11570, + "PT": 11571, + "Ġbol": 11572, + "Ġhelic": 11573, + "Ġwealthy": 11574, + "Ġrifle": 11575, + "strong": 11576, + "rowing": 11577, + "plan": 11578, + "asury": 11579, + "âĢ¦.": 11580, + "Ġexpanding": 11581, + "ĠHamilton": 11582, + "Ġreceives": 11583, + "SI": 11584, + "eatures": 11585, + "ĠAnim": 11586, + "REE": 11587, + "Put": 11588, + "Ġbriefly": 11589, + "rive": 11590, + "Ġstimul": 11591, + "Ġ``(": 11592, + "Ġ__": 11593, + "Ġchip": 11594, + "Ġhaz": 11595, + "Ġprize": 11596, + "ĠThings": 11597, + "ACE": 11598, + "ulin": 11599, + "dict": 11600, + "oku": 11601, + "Ġassociate": 11602, + "ockets": 11603, + "youtube": 11604, + "Story": 11605, + "ategory": 11606, + "Ġmild": 11607, + "ailing": 11608, + "ĠYe": 11609, + "Orig": 11610, + "ĠKa": 11611, + "orig": 11612, + "Ġpropaganda": 11613, + "Ġanonymous": 11614, + "Ġstruggled": 11615, + "Ġoutrage": 11616, + "ATED": 11617, + "ĠBeijing": 11618, + "rary": 11619, + "Ġleather": 11620, + "Ġworlds": 11621, + "Ġbroader": 11622, + "125": 11623, + "idal": 11624, + "ĠBetter": 11625, + "Ġtear": 11626, + "Ext": 11627, + "Ġproposals": 11628, + "Ġiter": 11629, + "ĠSquad": 11630, + "Ġvolunt": 11631, + "mi": 11632, + "Did": 11633, + "ĠPu": 11634, + "pin": 11635, + "Ġspeakers": 11636, + "Ġborders": 11637, + "Ġfigured": 11638, + "='": 11639, + "Ġsimultaneously": 11640, + "aeda": 11641, + "Ġcharging": 11642, + "Ġurged": 11643, + "Ġconj": 11644, + "256": 11645, + "ĠGordon": 11646, + "merce": 11647, + "Ġdocumentary": 11648, + "Share": 11649, + "itol": 11650, + "ONE": 11651, + "ĠGarden": 11652, + "hatt": 11653, + "ĠThompson": 11654, + "aneous": 11655, + "apore": 11656, + "Ġtanks": 11657, + "Ġlessons": 11658, + "track": 11659, + "Ġoutstanding": 11660, + "Ġvolunteers": 11661, + "Ġspray": 11662, + "Ġmanagers": 11663, + "large": 11664, + "Ġcamps": 11665, + "Ġartificial": 11666, + "ĠRu": 11667, + "Ġbags": 11668, + "thal": 11669, + "Ġcompatible": 11670, + "ĠBlade": 11671, + "Ġfed": 11672, + "Ġargues": 11673, + "FI": 11674, + "Ġunfair": 11675, + "Ġcorn": 11676, + "Ġoffset": 11677, + "Ġdirections": 11678, + "Ġdisappointed": 11679, + "ĠConvention": 11680, + "Ġviewing": 11681, + "ME": 11682, + "ocity": 11683, + "Ġtowns": 11684, + "Ġlayers": 11685, + "Ġrolled": 11686, + "Ġjumped": 11687, + "Ġattribute": 11688, + "Ġunnecess": 11689, + "incoln": 11690, + "Ġsuppose": 11691, + "ĠNether": 11692, + "cha": 11693, + "Ġburied": 11694, + "Ġsixth": 11695, + "Ben": 11696, + "ressing": 11697, + "OUR": 11698, + "Ġwound": 11699, + "Ġcycl": 11700, + "Ġmechanisms": 11701, + "Ġcongressional": 11702, + "ĠElement": 11703, + "Ġagreements": 11704, + "Ġdecor": 11705, + "Ġclosest": 11706, + "ĠMit": 11707, + "Google": 11708, + "}}": 11709, + "Ġmixture": 11710, + "Ġfluid": 11711, + "Sign": 11712, + "ĠScholar": 11713, + "Ġpist": 11714, + "asket": 11715, + "abling": 11716, + "Ġracing": 11717, + "hero": 11718, + "riel": 11719, + "assy": 11720, + "Ġcheaper": 11721, + "ben": 11722, + "Ġvertical": 11723, + "amacare": 11724, + "ĠReading": 11725, + "gments": 11726, + "Ġhelicop": 11727, + "Ġsacrifice": 11728, + "aya": 11729, + "paren": 11730, + "VA": 11731, + "ĠLes": 11732, + "ĠStudio": 11733, + "Ġviolations": 11734, + "ĠAnna": 11735, + "acer": 11736, + "é¾": 11737, + "ĠRat": 11738, + "ĠBeck": 11739, + "ĠDick": 11740, + "ĠACT": 11741, + "Ġcomposition": 11742, + "Ġtexture": 11743, + "ĠOwn": 11744, + "Ġsmartphone": 11745, + "ĠNA": 11746, + "Ġforb": 11747, + "import": 11748, + "Ġdefending": 11749, + "ilst": 11750, + "rer": 11751, + "Ġoh": 11752, + "ĠJeremy": 11753, + "Ġbanking": 11754, + "ceptions": 11755, + "Ġrespective": 11756, + "/.": 11757, + "Ġdrinks": 11758, + "ĠWi": 11759, + "Ġbands": 11760, + "ĠLiverpool": 11761, + "Ġgrip": 11762, + "ĠBuy": 11763, + "Ġopenly": 11764, + "Ġreviewed": 11765, + "pert": 11766, + "Ġverify": 11767, + "ĠCole": 11768, + "ĠWales": 11769, + "MO": 11770, + "Ġunpre": 11771, + "Ġshelter": 11772, + "ĠImperial": 11773, + "Ġgui": 11774, + "ĠDak": 11775, + "Ġsuggestions": 11776, + "Ġexplicitly": 11777, + "Ġslave": 11778, + "Ġblockchain": 11779, + "Ġcompeting": 11780, + "Ġpromising": 11781, + "SON": 11782, + "Ġsoccer": 11783, + "Ġconstitution": 11784, + "429": 11785, + "Ġdistract": 11786, + "ĠUser": 11787, + "esides": 11788, + "ĠMethod": 11789, + "ĠTokyo": 11790, + "Ġaccompanied": 11791, + "Client": 11792, + "sur": 11793, + "alog": 11794, + "Ġidentification": 11795, + "Ġinvasion": 11796, + "asma": 11797, + "Ġindustries": 11798, + "ppers": 11799, + "Ġsubtle": 11800, + "ĠUnit": 11801, + "natural": 11802, + "Ġsurvived": 11803, + "Ġflaw": 11804, + "ĺħ": 11805, + "ĠHoll": 11806, + "Ġdeficit": 11807, + "Ġtutorial": 11808, + "ĠChance": 11809, + "Ġarguing": 11810, + "Ġcontemporary": 11811, + "Ġintegration": 11812, + "forward": 11813, + "Ġtum": 11814, + "itis": 11815, + "Ġhiding": 11816, + "ĠDomin": 11817, + "ĠTan": 11818, + "ĠBuilding": 11819, + "ĠVin": 11820, + "Ġspokesperson": 11821, + "ĠNotes": 11822, + "Ġemerging": 11823, + "Ġpreparation": 11824, + "Ġprost": 11825, + "Ġsuspects": 11826, + "Ġautonom": 11827, + "Description": 11828, + "Ġdealt": 11829, + "ĠPear": 11830, + "Ġsteady": 11831, + "Ġdecreased": 11832, + "Ġsovere": 11833, + "ĠClin": 11834, + "Ġgradually": 11835, + "orses": 11836, + "ĠWAR": 11837, + "Serv": 11838, + "ãĤ¢": 11839, + "hr": 11840, + "Ġdirty": 11841, + "ĠBarn": 11842, + "ĠBC": 11843, + "Ġdil": 11844, + "Ġcalendar": 11845, + "Ġcompliance": 11846, + "Ġchamber": 11847, + "bb": 11848, + "Ġpassenger": 11849, + "ateful": 11850, + "ĠTitle": 11851, + "ĠSydney": 11852, + "ĠGot": 11853, + "Ġdarkness": 11854, + "Ġdefect": 11855, + "Ġpacked": 11856, + "assion": 11857, + "Ġgods": 11858, + "Ġharsh": 11859, + "ICK": 11860, + "leans": 11861, + "Ġalgorithm": 11862, + "Ġoxygen": 11863, + "Ġvisits": 11864, + "Ġblade": 11865, + "Ġkilomet": 11866, + "ĠKentucky": 11867, + "Ġkiller": 11868, + "Pack": 11869, + "enny": 11870, + "Ġdivine": 11871, + "Ġnomination": 11872, + "being": 11873, + "Ġengines": 11874, + "Ġcats": 11875, + "Ġbuffer": 11876, + "ĠPhill": 11877, + "Ġtraff": 11878, + "AGE": 11879, + "Ġtongue": 11880, + "Ġradiation": 11881, + "erer": 11882, + "mem": 11883, + "ĠExplicit": 11884, + "é¾į": 11885, + "Ġcouples": 11886, + "Ġphysics": 11887, + "ĠMcK": 11888, + "Ġpolitically": 11889, + "awks": 11890, + "ĠBloom": 11891, + "Ġworship": 11892, + "eger": 11893, + "uter": 11894, + "ĠFO": 11895, + "Ġmathemat": 11896, + "Ġsentenced": 11897, + "Ġdisk": 11898, + "ĠMarg": 11899, + "Ġ/*": 11900, + "PI": 11901, + "Ġoptional": 11902, + "Ġbabies": 11903, + "Ġseeds": 11904, + "ĠScottish": 11905, + "Ġthy": 11906, + "]]": 11907, + "ĠHitler": 11908, + "PH": 11909, + "ngth": 11910, + "Ġrecovered": 11911, + "inge": 11912, + "Ġpowder": 11913, + "Ġlips": 11914, + "Ġdesigner": 11915, + "Ġdisorders": 11916, + "Ġcourage": 11917, + "Ġchaos": 11918, + "\"},{\"": 11919, + "Ġcarrier": 11920, + "bably": 11921, + "High": 11922, + "ĠRT": 11923, + "esity": 11924, + "len": 11925, + "Ġroutes": 11926, + "uating": 11927, + "Fil": 11928, + "NOT": 11929, + "wall": 11930, + "sburgh": 11931, + "Ġengaging": 11932, + "ĠJavaScript": 11933, + "orer": 11934, + "lihood": 11935, + "Ġunions": 11936, + "ĠFederation": 11937, + "ĠTesla": 11938, + "Ġcompletion": 11939, + "ĠTa": 11940, + "Ġprivilege": 11941, + "ĠOrange": 11942, + "Ġneur": 11943, + "parency": 11944, + "Ġbones": 11945, + "Ġtitled": 11946, + "Ġprosecutors": 11947, + "ĠME": 11948, + "Ġengineer": 11949, + "ĠUniverse": 11950, + "ĠHig": 11951, + "nie": 11952, + "oard": 11953, + "Ġhearts": 11954, + "ĠGre": 11955, + "ussion": 11956, + "Ġministry": 11957, + "Ġpenet": 11958, + "ĠNut": 11959, + "ĠOw": 11960, + "ĠXP": 11961, + "instein": 11962, + "Ġbulk": 11963, + "System": 11964, + "icism": 11965, + "ĠMarketable": 11966, + "Ġpreval": 11967, + "Ġposter": 11968, + "Ġattending": 11969, + "urable": 11970, + "Ġlicensed": 11971, + "ĠGh": 11972, + "etry": 11973, + "ĠTradable": 11974, + "Ġblast": 11975, + "à¤": 11976, + "ĠTitan": 11977, + "elled": 11978, + "die": 11979, + "Have": 11980, + "ĠFlame": 11981, + "Ġprofound": 11982, + "Ġparticipating": 11983, + "Ġanime": 11984, + "ĠEss": 11985, + "Ġspecify": 11986, + "Ġregarded": 11987, + "ĠSpell": 11988, + "Ġsons": 11989, + "owned": 11990, + "Ġmerc": 11991, + "Ġexperimental": 11992, + "lando": 11993, + "hs": 11994, + "ĠDungeon": 11995, + "inos": 11996, + "Ġcomply": 11997, + "ĠSystems": 11998, + "arth": 11999, + "Ġseized": 12000, + "local": 12001, + "ĠGirls": 12002, + "udo": 12003, + "oned": 12004, + "ĠFle": 12005, + "Ġconstructed": 12006, + "Ġhosted": 12007, + "Ġscared": 12008, + "actic": 12009, + "ĠIslands": 12010, + "ĠMORE": 12011, + "Ġbless": 12012, + "Ġblocking": 12013, + "Ġchips": 12014, + "Ġevac": 12015, + "Ps": 12016, + "Ġcorporation": 12017, + "Ġox": 12018, + "Ġlighting": 12019, + "Ġneighbors": 12020, + "ĠUb": 12021, + "aro": 12022, + "Ġbeef": 12023, + "ĠUber": 12024, + "Facebook": 12025, + "armed": 12026, + "itate": 12027, + "ĠRating": 12028, + "ĠQuick": 12029, + "Ġoccupied": 12030, + "Ġaims": 12031, + "ĠAdditionally": 12032, + "ĠInterest": 12033, + "Ġdramatically": 12034, + "Ġheal": 12035, + "Ġpainting": 12036, + "Ġengineers": 12037, + "MM": 12038, + "ĠMust": 12039, + "Ġquantity": 12040, + "Paul": 12041, + "Ġearnings": 12042, + "ĠPosts": 12043, + "stra": 12044, + "ãĥ¼ãĥ": 12045, + "Ġstance": 12046, + "Ġdropping": 12047, + "script": 12048, + "Ġdressed": 12049, + "Make": 12050, + "Ġjustify": 12051, + "ĠLtd": 12052, + "Ġprompted": 12053, + "Ġscrut": 12054, + "Ġspeeds": 12055, + "ĠGiants": 12056, + "omer": 12057, + "ĠEditor": 12058, + "Ġdescribing": 12059, + "ĠLie": 12060, + "mented": 12061, + "Ġnowhere": 12062, + "ocaly": 12063, + "Ġinstruction": 12064, + "fortable": 12065, + "Ġentities": 12066, + "Ġcm": 12067, + "ĠNatural": 12068, + "Ġinquiry": 12069, + "Ġpressed": 12070, + "izont": 12071, + "forced": 12072, + "Ġraises": 12073, + "ĠNetflix": 12074, + "ĠSide": 12075, + "Ġouter": 12076, + "Ġamongst": 12077, + "ims": 12078, + "owski": 12079, + "Ġclimb": 12080, + "never": 12081, + "Ġcombine": 12082, + "ding": 12083, + "Ġcompr": 12084, + "Ġsignificance": 12085, + "Ġremembered": 12086, + "ĠNevada": 12087, + "ĠTel": 12088, + "ĠScar": 12089, + "ĠWarriors": 12090, + "ĠJane": 12091, + "Ġcoup": 12092, + "bas": 12093, + "Ġterminal": 12094, + ",-": 12095, + "OH": 12096, + "Ġtension": 12097, + "Ġwings": 12098, + "ĠMyster": 12099, + "����": 12100, + "ĠUnlike": 12101, + "valid": 12102, + "vironments": 12103, + "ĠAli": 12104, + "Ġnaked": 12105, + "books": 12106, + "ĠMun": 12107, + "ĠGulf": 12108, + "Ġdensity": 12109, + "Ġdimin": 12110, + "Ġdesperate": 12111, + "Ġpresidency": 12112, + "Ġ1986": 12113, + "hy": 12114, + "IND": 12115, + "Ġunlock": 12116, + "imens": 12117, + "Ġhandled": 12118, + "ĠEb": 12119, + "Ġdisappeared": 12120, + "Ġgenre": 12121, + "Ġ1988": 12122, + "Ġdetermination": 12123, + "Stream": 12124, + "iko": 12125, + "apters": 12126, + "Ġacknowledge": 12127, + "Jan": 12128, + "Ġcapitalism": 12129, + "Pat": 12130, + "Ġ2020": 12131, + "Ġpainful": 12132, + "Ġcurve": 12133, + "Ġbombs": 12134, + "storm": 12135, + "ĠMetal": 12136, + "encer": 12137, + "ĠFig": 12138, + "ĠAaron": 12139, + "anches": 12140, + "Ġinspiration": 12141, + "Ġexhaust": 12142, + "tains": 12143, + "ashi": 12144, + "Ġdescript": 12145, + "Ġritual": 12146, + "ĠChelsea": 12147, + "Ġpromotion": 12148, + "ĠHung": 12149, + "ĠWard": 12150, + "iva": 12151, + "ĠET": 12152, + "Ġtoss": 12153, + "allow": 12154, + "ĠFrancis": 12155, + "Dep": 12156, + "Ġhappiness": 12157, + "ĠGlass": 12158, + "Ġbeta": 12159, + "Ġstrengthen": 12160, + "NE": 12161, + "oa": 12162, + "Ġbuttons": 12163, + "ĠMurray": 12164, + "Ġkicked": 12165, + "Quest": 12166, + "ĠTalk": 12167, + "ĠSeveral": 12168, + "ĠZero": 12169, + "Ġdrone": 12170, + "ulk": 12171, + "Ġcam": 12172, + "ĠMobile": 12173, + "Ġpreventing": 12174, + "Ġretro": 12175, + "ĠAx": 12176, + "Ġcruel": 12177, + "Ġfloat": 12178, + ".),": 12179, + "Ġfiling": 12180, + "ĠGrant": 12181, + "ĠBor": 12182, + "Ġrib": 12183, + "Ġchampionship": 12184, + "ĠMerc": 12185, + "Ġstyles": 12186, + "Ġcake": 12187, + "Ġbuilds": 12188, + "ĠSelf": 12189, + "iox": 12190, + "Ġepic": 12191, + "oyd": 12192, + "Bel": 12193, + "ĠStew": 12194, + ".(": 12195, + "ahu": 12196, + "ĠBeyond": 12197, + "Ġouts": 12198, + "Ġsolo": 12199, + "ĠTree": 12200, + "Ġpreserve": 12201, + "Ġtub": 12202, + "ARE": 12203, + "roc": 12204, + "ĠImpro": 12205, + "ĠWright": 12206, + "Ġbund": 12207, + "Ġtraged": 12208, + "Ġoccasional": 12209, + "bian": 12210, + "Second": 12211, + "rons": 12212, + "Ġinteractions": 12213, + "formed": 12214, + "sing": 12215, + "Ġowns": 12216, + "Ġhockey": 12217, + "General": 12218, + "Ġlogical": 12219, + "Ġexpend": 12220, + "Ġescal": 12221, + "ĠGriff": 12222, + "ĠCrown": 12223, + "ĠReserve": 12224, + "Ġstopping": 12225, + "Ġexcuse": 12226, + "second": 12227, + "Ġoperated": 12228, + "Ġreaches": 12229, + "ĠMalays": 12230, + "Ġpollution": 12231, + "ĠBrooklyn": 12232, + "Ġdelete": 12233, + "Ġhash": 12234, + "Block": 12235, + "aha": 12236, + "âĢ³": 12237, + "Ġshorter": 12238, + "piece": 12239, + ">>>": 13163, + "ĠMormon": 13164, + "tor": 13165, + "Ġparticles": 13166, + "ĠBart": 13167, + "ryption": 13168, + "Ġadmin": 13169, + "Ġsquee": 13170, + "VIDIA": 13171, + "Ġcreator": 13172, + "iameter": 13173, + "icular": 13174, + "NBC": 13175, + "Ġgrabbed": 13176, + "Ġnodd": 13177, + "Ġrated": 13178, + "Ġrotation": 13179, + "Ġgrasp": 13180, + "Ġexcessive": 13181, + "ĠEC": 13182, + "ĠWhit": 13183, + "Ġinventory": 13184, + "aults": 13185, + "ĠFB": 13186, + "Ġecosystem": 13187, + "Ġbillions": 13188, + "Ġventure": 13189, + "named": 13190, + "Ġdefender": 13191, + "oute": 13192, + "Instead": 13193, + "irable": 13194, + "War": 13195, + "Ġassumption": 13196, + "Ġbite": 13197, + "Ġearthqu": 13198, + "tail": 13199, + "space": 13200, + "Ġgifts": 13201, + "boys": 13202, + "Ġinevitable": 13203, + "Ġstructural": 13204, + "Ġbeneficial": 13205, + "Ġcompelling": 13206, + "hole": 13207, + "ervation": 13208, + "Ġcoat": 13209, + "oj": 13210, + "incarn": 13211, + "ĠYears": 13212, + "Ġdetermining": 13213, + "Ġrhetoric": 13214, + "Ġboundaries": 13215, + "Ġwhites": 13216, + "Ant": 13217, + "addy": 13218, + ")-": 13219, + "raham": 13220, + "etermin": 13221, + "Ġharvest": 13222, + "ĠConc": 13223, + "Ġlaptop": 13224, + "ĠMatch": 13225, + "Ġenjoying": 13226, + "cca": 13227, + "ollar": 13228, + "Ġtrips": 13229, + "Ġaddiction": 13230, + "ĠSak": 13231, + "Ġpowered": 13232, + "Ġcous": 13233, + "ĠRussians": 13234, + "iere": 13235, + "Ġretrie": 13236, + "quality": 13237, + "Ġdiffer": 13238, + "Ġkingdom": 13239, + "ĠLaur": 13240, + "ĠCapitol": 13241, + "Ġconclusions": 13242, + "ĠAltern": 13243, + "ĠNav": 13244, + "Ġtransparent": 13245, + "BER": 13246, + "Group": 13247, + "ĠComplete": 13248, + "Ġinfer": 13249, + "Ġintrig": 13250, + "Ġinsane": 13251, + "RO": 13252, + "ophob": 13253, + "isen": 13254, + "qual": 13255, + "Michael": 13256, + "Ġmuseum": 13257, + "ĠPope": 13258, + "Ġreset": 13259, + "rative": 13260, + "five": 13261, + "Ġaggreg": 13262, + "ittees": 13263, + "ository": 13264, + "Ġcarb": 13265, + "ĠRecord": 13266, + "Ġdecides": 13267, + "ĠFix": 13268, + "Ġexceptions": 13269, + "ĠCommissioner": 13270, + "uns": 13271, + "ĠEnvironmental": 13272, + "Ġlegendary": 13273, + "istence": 13274, + "Ġtunnel": 13275, + "km": 13276, + "Ġinsult": 13277, + "Ġtroll": 13278, + "Ġshake": 13279, + "Ġdetention": 13280, + "ques": 13281, + "ĠChrome": 13282, + "ĠFiles": 13283, + "Ġsubt": 13284, + "Ġprospects": 13285, + "Ġprol": 13286, + "render": 13287, + "proof": 13288, + "Ġperformances": 13289, + "Str": 13290, + "Ġhref": 13291, + "ername": 13292, + "Ġachievement": 13293, + "Ġfut": 13294, + "Full": 13295, + "ĠLeban": 13296, + "google": 13297, + "ãĥĪ": 13298, + "ampa": 13299, + "Maybe": 13300, + "Ġprojected": 13301, + "ĠEmb": 13302, + "Ġcolleg": 13303, + "Ġawards": 13304, + "ĠâĶ": 13305, + "Gold": 13306, + "ĠBlake": 13307, + "ĠRaj": 13308, + "ifting": 13309, + "Ġpending": 13310, + "Ġinstinct": 13311, + "Ġdevelopments": 13312, + "Connect": 13313, + "ĠMand": 13314, + "ĠWITH": 13315, + "ĠPhilippines": 13316, + "profile": 13317, + "Ġaltogether": 13318, + "ĠBund": 13319, + "ĠTD": 13320, + "oooo": 13321, + "amped": 13322, + "iph": 13323, + "Ġsteam": 13324, + "Ġoldest": 13325, + "Ġdetection": 13326, + "ulpt": 13327, + "Ġç": 13328, + "ĠWayne": 13329, + "2006": 13330, + "fa": 13331, + "Ġcircles": 13332, + "ĠFu": 13333, + "Ġdonors": 13334, + "appropriate": 13335, + "ĠDakota": 13336, + "jamin": 13337, + "Ġmotivated": 13338, + "Ġpurchases": 13339, + "ĠLouisiana": 13340, + "ĠSpl": 13341, + "Ġglobe": 13342, + "Ġ105": 13343, + "zip": 13344, + "call": 13345, + "Ġdepartments": 13346, + "Ġsustainable": 13347, + "105": 13348, + "ĠOP": 13349, + "ifiers": 13350, + "Ġprevented": 13351, + "Ġincomp": 13352, + "ĠCommander": 13353, + "Ġdominated": 13354, + "Ġ»": 13355, + "Ġinvested": 13356, + "Ġcomplexity": 13357, + "Ġincl": 13358, + "Ġensuring": 13359, + "Ġrealm": 13360, + "ync": 13361, + "ĠIndependent": 13362, + "rained": 13363, + "ĠJen": 13364, + "ĠFlight": 13365, + "Ġathe": 13366, + "Ġspeculation": 13367, + "ĠTE": 13368, + "ocate": 13369, + "tic": 13370, + "Ġplaint": 13371, + "herry": 13372, + "Ġtoy": 13373, + "Ġ111": 13374, + "Ġplates": 13375, + "status": 13376, + "ĠIsa": 13377, + "Ġdevoted": 13378, + "Cop": 13379, + "ĠES": 13380, + "255": 13381, + "urrency": 13382, + "Main": 13383, + "Ġslaves": 13384, + "Ġpepper": 13385, + "Ġquotes": 13386, + "Ġceiling": 13387, + "ĠFish": 13388, + "Ġtransformation": 13389, + "Ġfraction": 13390, + "Ġadvantages": 13391, + "Ġtoile": 13392, + "Ġstunning": 13393, + "Ġmoist": 13394, + "breaking": 13395, + "si": 13396, + "ĠLocation": 13397, + "ĠMedium": 13398, + "Ġtexts": 13399, + "Ġugly": 13400, + "Ġbio": 13401, + ".âĢĶ": 13402, + "ĠBased": 13403, + "Ġtrains": 13404, + "ĠWing": 13405, + "ĠAncient": 13406, + "ĠRecords": 13407, + "ĠHope": 13408, + "Special": 13409, + "adesh": 13410, + "obi": 13411, + "[/": 13412, + "Ġtemporarily": 13413, + "Ver": 13414, + "hu": 13415, + "oser": 13416, + "Ġovernight": 13417, + "Ġmamm": 13418, + "ĠTreasury": 13419, + "ĠVenezuel": 13420, + "ĠMega": 13421, + "Ġtar": 13422, + "Ġexpects": 13423, + "black": 13424, + "orph": 13425, + "\\\\\\\\": 13426, + "Ġacceptance": 13427, + "Ġradar": 13428, + "sis": 13429, + "Ġjunior": 13430, + "Ġframes": 13431, + "Ġobservation": 13432, + "acies": 13433, + "Power": 13434, + "ĠAdvanced": 13435, + "Mag": 13436, + "ologically": 13437, + "ĠMechan": 13438, + "Ġsentences": 13439, + "Ġanalysts": 13440, + "aughters": 13441, + "forcement": 13442, + "Ġvague": 13443, + "Ġclause": 13444, + "Ġdirectors": 13445, + "Ġevaluate": 13446, + "Ġcabinet": 13447, + "Matt": 13448, + "ĠClassic": 13449, + "Ang": 13450, + "Ġcler": 13451, + "ĠBuck": 13452, + "Ġresearcher": 13453, + "Ġ160": 13454, + "Ġpoorly": 13455, + "Ġexperiencing": 13456, + "ĠPed": 13457, + "ĠManhattan": 13458, + "Ġfreed": 13459, + "Ġthemes": 13460, + "advant": 13461, + "Ġnin": 13462, + "Ġpraise": 13463, + "104": 13464, + "ĠLibya": 13465, + "best": 13466, + "Ġtrusted": 13467, + "Ġcease": 13468, + "Ġdign": 13469, + "Direct": 13470, + "Ġbombing": 13471, + "Ġmigration": 13472, + "ĠSciences": 13473, + "Ġmunicipal": 13474, + "ĠAverage": 13475, + "Ġglory": 13476, + "Ġrevealing": 13477, + "Ġarena": 13478, + "Ġuncertainty": 13479, + "Ġbattlefield": 13480, + "iao": 13481, + "God": 13482, + "Ġcinem": 13483, + "rape": 13484, + "elle": 13485, + "apons": 13486, + "Ġlisting": 13487, + "Ġwaited": 13488, + "Ġspotted": 13489, + "keley": 13490, + "ĠAudio": 13491, + "eor": 13492, + "arding": 13493, + "idding": 13494, + "igma": 13495, + "ĠNeg": 13496, + "Ġlone": 13497, + "Ġ----": 13498, + "exe": 13499, + "deg": 13500, + "Ġtransf": 13501, + "Ġwash": 13502, + "Ġslavery": 13503, + "Ġexploring": 13504, + "ĠWW": 13505, + "atson": 13506, + "Ġencl": 13507, + "lies": 13508, + "ĠCreek": 13509, + "Ġwooden": 13510, + "Manager": 13511, + "ĠBrand": 13512, + "ummy": 13513, + "ĠArthur": 13514, + "Ġbureaucr": 13515, + "Ġblend": 13516, + "arians": 13517, + "Further": 13518, + "Ġsupposedly": 13519, + "Ġwinds": 13520, + "Ġ1979": 13521, + "Ġgravity": 13522, + "Ġanalyses": 13523, + "ĠTravel": 13524, + "ĠVeter": 13525, + "Ġdumb": 13526, + "Ġalternate": 13527, + "gal": 13528, + "Ġconsumed": 13529, + "Ġeffectiveness": 13530, + ".''": 13531, + "Ġpaths": 13532, + "onda": 13533, + "LA": 13534, + "ĠStrong": 13535, + "Ġenables": 13536, + "Ġescaped": 13537, + "Ġ\"\"": 13538, + "Ġ112": 13539, + "Ġ1983": 13540, + "Ġsmiled": 13541, + "Ġtendency": 13542, + "Fire": 13543, + "Ġpars": 13544, + "ĠRoc": 13545, + "Ġlake": 13546, + "Ġfitness": 13547, + "ĠAth": 13548, + "ĠHorn": 13549, + "Ġhier": 13550, + "Ġimpose": 13551, + "mother": 13552, + "Ġpension": 13553, + "icut": 13554, + "borne": 13555, + "iciary": 13556, + "._": 13557, + "ĠSU": 13558, + "Ġpolar": 13559, + "isy": 13560, + "engu": 13561, + "itialized": 13562, + "ATA": 13563, + "write": 13564, + "Ġexercises": 13565, + "ĠDiamond": 13566, + "otypes": 13567, + "Ġharmful": 13568, + "onz": 13569, + "Ġprinting": 13570, + "story": 13571, + "Ġexpertise": 13572, + "ĠGer": 13573, + "Ġtragedy": 13574, + "ĠFly": 13575, + "Ġdivid": 13576, + "ampire": 13577, + "stock": 13578, + "Mem": 13579, + "Ġreign": 13580, + "Ġunve": 13581, + "Ġamend": 13582, + "ĠProphet": 13583, + "Ġmutual": 13584, + "ĠFac": 13585, + "Ġreplacing": 13586, + "Har": 13587, + "ĠCircuit": 13588, + "Ġthroat": 13589, + "ĠShot": 13590, + "Ġbatteries": 13591, + "Ġtoll": 13592, + "Ġaddressing": 13593, + "ĠMedicaid": 13594, + "Ġpupp": 13595, + "ĠNar": 13596, + "olk": 13597, + "Ġequity": 13598, + "MR": 13599, + "ĠHispan": 13600, + "ĠLarge": 13601, + "mid": 13602, + "Dev": 13603, + "Ġexped": 13604, + "Ġdemo": 13605, + "ĠMarshall": 13606, + "ergus": 13607, + "Ġfiber": 13608, + "Ġdivorce": 13609, + "ĠCreate": 13610, + "Ġslower": 13611, + "ĠParker": 13612, + "ĠStudent": 13613, + "ĠTraining": 13614, + "Return": 13615, + "ĠTru": 13616, + "Ġcub": 13617, + "ĠReached": 13618, + "Ġpanic": 13619, + "Ġquarters": 13620, + "Ġrect": 13621, + "Ġtreating": 13622, + "Ġrats": 13623, + "ĠChristianity": 13624, + "oler": 13625, + "Ġsacred": 13626, + "Ġdeclare": 13627, + "ulative": 13628, + "eting": 13629, + "Ġdelivering": 13630, + "estone": 13631, + "Ġtel": 13632, + "ĠLarry": 13633, + "Ġmeta": 13634, + "accept": 13635, + "artz": 13636, + "ĠRoger": 13637, + "handed": 13638, + "Ġheader": 13639, + "Ġtrapped": 13640, + "ĠCentury": 13641, + "Ġknocked": 13642, + "ĠOxford": 13643, + "Ġsurvivors": 13644, + "bot": 13645, + "Ġdemonstration": 13646, + "Ġdirt": 13647, + "Ġassists": 13648, + "OME": 13649, + "ĠDraft": 13650, + "ortunate": 13651, + "folio": 13652, + "pered": 13653, + "usters": 13654, + "gt": 13655, + "ĠLock": 13656, + "Ġjudicial": 13657, + "verted": 13658, + "Ġsecured": 13659, + "outing": 13660, + "ĠBooks": 13661, + "Ġhosting": 13662, + "Ġlifted": 13663, + "length": 13664, + "Ġjer": 13665, + "Ġwheels": 13666, + "ĠRange": 13667, + "umbnails": 13668, + "Ġdiagnosis": 13669, + "tech": 13670, + "ĠStewart": 13671, + "ĠPract": 13672, + "Ġnationwide": 13673, + "Ġdear": 13674, + "Ġobligations": 13675, + "Ġgrows": 13676, + "Ġmandatory": 13677, + "Ġsuspicious": 13678, + "!'": 13679, + "Apr": 13680, + "Great": 13681, + "Ġmortgage": 13682, + "Ġprosecutor": 13683, + "Ġeditorial": 13684, + "ĠKr": 13685, + "Ġprocessed": 13686, + "ungle": 13687, + "Ġflexibility": 13688, + "Earlier": 13689, + "ĠCart": 13690, + "ĠSug": 13691, + "Ġfocuses": 13692, + "Ġstartup": 13693, + "Ġbreach": 13694, + "ĠTob": 13695, + "cycle": 13696, + "ãĢĮ": 13697, + "rose": 13698, + "Ġbizarre": 13699, + "ãĢį": 13700, + "Ġvegetables": 13701, + "$$": 13702, + "Ġretreat": 13703, + "oshi": 13704, + "ĠShop": 13705, + "ĠGround": 13706, + "ĠStop": 13707, + "ĠHawaii": 13708, + "ĠAy": 13709, + "Perhaps": 13710, + "ĠBeaut": 13711, + "uffer": 13712, + "enna": 13713, + "Ġproductivity": 13714, + "Fixed": 13715, + "control": 13716, + "Ġabsent": 13717, + "ĠCampaign": 13718, + "Green": 13719, + "Ġidentifying": 13720, + "Ġregret": 13721, + "Ġpromoted": 13722, + "ĠSeven": 13723, + "Ġeru": 13724, + "neath": 13725, + "aughed": 13726, + "ĠPin": 13727, + "ĠLiving": 13728, + "Cost": 13729, + "omatic": 13730, + "mega": 13731, + "ĠNig": 13732, + "ocy": 13733, + "Ġinbox": 13734, + "Ġempire": 13735, + "Ġhorizont": 13736, + "Ġbranches": 13737, + "Ġmetaph": 13738, + "Active": 13739, + "edi": 13740, + "ĠFilm": 13741, + "ĠSomething": 13742, + "Ġmods": 13743, + "incial": 13744, + "ĠOriginal": 13745, + "Gen": 13746, + "Ġspirits": 13747, + "Ġearning": 13748, + "Hist": 13749, + "Ġriders": 13750, + "Ġsacrific": 13751, + "MT": 13752, + "ĠVA": 13753, + "ĠSalt": 13754, + "Ġoccupation": 13755, + "ĠMi": 13756, + "Ġdisg": 13757, + "lict": 13758, + "Ġnit": 13759, + "Ġnodes": 13760, + "eem": 13761, + "ĠPier": 13762, + "Ġhatred": 13763, + "psy": 13764, + "ãĥī": 13765, + "Ġtheater": 13766, + "Ġsophisticated": 13767, + "Ġdefended": 13768, + "Ġbesides": 13769, + "Ġthoroughly": 13770, + "ĠMedicare": 13771, + "Ġblamed": 13772, + "arently": 13773, + "Ġcrying": 13774, + "FOR": 13775, + "priv": 13776, + "Ġsinging": 13777, + "ĠIl": 13778, + "Ġcute": 13779, + "oided": 13780, + "olitical": 13781, + "ĠNeuro": 13782, + "å¤": 13783, + "Ġdonation": 13784, + "ĠEagles": 13785, + "ĠGive": 13786, + "Tom": 13787, + "Ġsubstantially": 13788, + "ĠLicense": 13789, + "ĠJa": 13790, + "Ġgrey": 13791, + "ĠAnimal": 13792, + "ĠER": 13793, + "ĠUnd": 13794, + "Ġkeen": 13795, + "Ġconclude": 13796, + "ĠMississippi": 13797, + "Engine": 13798, + "ĠStudios": 13799, + "Press": 13800, + "overs": 13801, + "llers": 13802, + "Ġ350": 13803, + "ĠRangers": 13804, + "Ġrou": 13805, + "erto": 13806, + "Ep": 13807, + "issa": 13808, + "ivan": 13809, + "Ġseal": 13810, + "ĠRegist": 13811, + "display": 13812, + "Ġweaken": 13813, + "uum": 13814, + "ĠCommons": 13815, + "ĠSay": 13816, + "Ġcultures": 13817, + "Ġlaughed": 13818, + "Ġslip": 13819, + "Ġtreatments": 13820, + "izable": 13821, + "mart": 13822, + "ĠRice": 13823, + "Ġbeast": 13824, + "Ġobesity": 13825, + "ĠLaure": 13826, + "iga": 13827, + "Which": 13828, + "holder": 13829, + "Ġelderly": 13830, + "Ġpays": 13831, + "Ġcomplained": 13832, + "Ġcrop": 13833, + "Ġproc": 13834, + "Ġexplosive": 13835, + "ĠFan": 13836, + "ĠArsenal": 13837, + "Author": 13838, + "eful": 13839, + "Ġmeals": 13840, + "Ġ(-": 13841, + "idays": 13842, + "Ġimagination": 13843, + "Ġannually": 13844, + "Ġms": 13845, + "asures": 13846, + "Head": 13847, + "ikh": 13848, + "matic": 13849, + "Ġboyfriend": 13850, + "ĠComputer": 13851, + "Ġbump": 13852, + "Ġsurge": 13853, + "ĠCraig": 13854, + "ĠKirk": 13855, + "Del": 13856, + "mediate": 13857, + "Ġscenarios": 13858, + "ĠMut": 13859, + "ĠStream": 13860, + "Ġcompetitors": 13861, + "ÙĦ": 13862, + "ĠStanford": 13863, + "ĠResources": 13864, + "azed": 13865, + "bage": 13866, + "Ġorganis": 13867, + "ĠRelease": 13868, + "Ġseparately": 13869, + "Ġhabits": 13870, + "Ġmeasurements": 13871, + "ĠClose": 13872, + "Ġaccompany": 13873, + "Ġgly": 13874, + "Ġtang": 13875, + "ĠRou": 13876, + "Ġplugin": 13877, + "Ġconvey": 13878, + "ĠChallenge": 13879, + "oots": 13880, + "jan": 13881, + "Ġcurs": 13882, + "ĠRelations": 13883, + "keeper": 13884, + "Ġapproaching": 13885, + "ping": 13886, + "Speaking": 13887, + "Ġarrangement": 13888, + "ĠVI": 13889, + "arettes": 13890, + "Ġaffecting": 13891, + "Ġpermits": 13892, + "because": 13893, + "Ġuseless": 13894, + "ĠHus": 13895, + "!!!!": 13896, + "Ġdestroying": 13897, + "Unfortunately": 13898, + "Ġfascinating": 13899, + "Sem": 13900, + "Ġelectoral": 13901, + "Ġtransparency": 13902, + "ĠChaos": 13903, + "Ġvolunteer": 13904, + "Ġstatistical": 13905, + "Ġactivated": 13906, + "rox": 13907, + "Web": 13908, + "HE": 13909, + "ĠHampshire": 13910, + "isive": 13911, + "Map": 13912, + "Ġtrash": 13913, + "ĠLawrence": 13914, + "stick": 13915, + "Cr": 13916, + "Ġrings": 13917, + "EXT": 13918, + "Ġoperational": 13919, + "opes": 13920, + "Does": 13921, + "ĠEvans": 13922, + "Ġwitnessed": 13923, + "Port": 13924, + "Ġlaunching": 13925, + "econom": 13926, + "wear": 13927, + "ĠParticip": 13928, + "umm": 13929, + "cules": 13930, + "ĠRAM": 13931, + "ĠTun": 13932, + "Ġassured": 13933, + "Ġbinary": 13934, + "Ġbetray": 13935, + "Ġexploration": 13936, + "ĠFel": 13937, + "Ġadmission": 13938, + "itated": 13939, + "Sy": 13940, + "Ġavoided": 13941, + "ĠSimulator": 13942, + "Ġcelebrated": 13943, + "ĠElectric": 13944, + "¥ŀ": 13945, + "Ġcluster": 13946, + "itzerland": 13947, + "health": 13948, + "Line": 13949, + "ĠNash": 13950, + "aton": 13951, + "Ġspare": 13952, + "Ġenterprise": 13953, + "ĠDIS": 13954, + "cludes": 13955, + "Ġflights": 13956, + "Ġregards": 13957, + "ĠÃĹ": 13958, + "half": 13959, + "Ġtrucks": 13960, + "Ġcontacts": 13961, + "Ġuncons": 13962, + "ĠClimate": 13963, + "Ġimmense": 13964, + "NEW": 13965, + "occ": 13966, + "ective": 13967, + "Ġembod": 13968, + "Ġpatrol": 13969, + "Ġbeside": 13970, + "Ġviable": 13971, + "Ġcreep": 13972, + "Ġtriggered": 13973, + "verning": 13974, + "Ġcomparable": 13975, + "ql": 13976, + "Ġgaining": 13977, + "asses": 13978, + "Ġ();": 13979, + "ĠGrey": 13980, + "ĠMLS": 13981, + "sized": 13982, + "Ġprosper": 13983, + "\"?": 13984, + "Ġpolling": 13985, + "Ġshar": 13986, + "ĠRC": 13987, + "Ġfirearm": 13988, + "orient": 13989, + "Ġfence": 13990, + "Ġvariations": 13991, + "giving": 13992, + "ĠPi": 13993, + "ospel": 13994, + "Ġpledge": 13995, + "Ġcure": 13996, + "Ġspy": 13997, + "Ġviolated": 13998, + "Ġrushed": 13999, + "Ġstroke": 14000, + "ĠBlog": 14001, + "sels": 14002, + "ĠEc": 14003, + ",''": 14004, + "Ġpale": 14005, + "ĠCollins": 14006, + "terror": 14007, + "ĠCanadians": 14008, + "Ġtune": 14009, + "Ġlaboratory": 14010, + "Ġnons": 14011, + "tarian": 14012, + "Ġdisability": 14013, + "ĠGam": 14014, + "Ġsinger": 14015, + "alg": 14016, + "ĠSenior": 14017, + "Ġtraded": 14018, + "ĠWarrior": 14019, + "Ġinfring": 14020, + "ĠFranklin": 14021, + "Ġstrain": 14022, + "ĠSwedish": 14023, + "Ġseventh": 14024, + "ĠBenn": 14025, + "ĠTell": 14026, + "Ġsyndrome": 14027, + "Ġwondered": 14028, + "iden": 14029, + "++++": 14030, + "igo": 14031, + "Ġpurple": 14032, + "Ġjournalism": 14033, + "Ġrebel": 14034, + "Ġfu": 14035, + "blog": 14036, + "Ġinvite": 14037, + "rencies": 14038, + "ĠContact": 14039, + "Israel": 14040, + "ĠContent": 14041, + "Ġcheer": 14042, + "Ġbedroom": 14043, + "ĠEngineering": 14044, + "ĠQueens": 14045, + "Ġdwell": 14046, + "ĠPlayStation": 14047, + "ĠDim": 14048, + "ĠColon": 14049, + "lr": 14050, + "Ġoperates": 14051, + "Ġmotivation": 14052, + "USA": 14053, + "astered": 14054, + "Core": 14055, + "ĠTruth": 14056, + "olo": 14057, + "OSE": 14058, + "ĠMemory": 14059, + "Ġpredec": 14060, + "Ġanarch": 14061, + "Ġ1920": 14062, + "ĠYam": 14063, + "è": 14064, + "bid": 14065, + "Ġgrateful": 14066, + "Ġexcitement": 14067, + "Ġtreasure": 14068, + "Ġlongest": 14069, + "ctive": 14070, + "Ġdeserves": 14071, + "Ġreserves": 14072, + "Ġcops": 14073, + "ĠOttawa": 14074, + "ĠEgyptian": 14075, + "anked": 14076, + "Ġartif": 14077, + "Ġhypothesis": 14078, + ":/": 14079, + "Ġpurchasing": 14080, + "Ġlovely": 14081, + "HP": 14082, + "Ġdivide": 14083, + "Ġstrictly": 14084, + "Ġquestioning": 14085, + "Ġtaxpayers": 14086, + "ĠJoy": 14087, + "Ġrolls": 14088, + "ĠHeavy": 14089, + "Ġports": 14090, + "Ġmagnetic": 14091, + "Ġinflamm": 14092, + "Ġbrush": 14093, + "tics": 14094, + "âĪĴ": 14095, + "Ġbottles": 14096, + "ppy": 14097, + "Ġpadd": 14098, + "ãĤ¯": 14099, + "million": 14100, + "Ġdevastating": 14101, + "Ġcompiled": 14102, + "Ġmedication": 14103, + "Ġtwelve": 14104, + "ĠPerry": 14105, + "Space": 14106, + "imb": 14107, + "your": 14108, + "Ġleaked": 14109, + "ĠTar": 14110, + "Ġunity": 14111, + "Ġinfected": 14112, + "Ġtraveled": 14113, + "IDE": 14114, + "ĠMcDonald": 14115, + "txt": 14116, + "ĠPrinc": 14117, + "Ġinterven": 14118, + "ĠTaiwan": 14119, + "ĠPow": 14120, + "Ġbearing": 14121, + "ĠThread": 14122, + "Ġzones": 14123, + "izards": 14124, + "unks": 14125, + "Chapter": 14126, + "llor": 14127, + "Ġ·": 14128, + "Ġwounds": 14129, + "Ġdiscretion": 14130, + "Ġsucceeded": 14131, + "iking": 14132, + "Ġiconic": 14133, + "Call": 14134, + "Ġscreening": 14135, + "ĠMis": 14136, + "icts": 14137, + "Ġministers": 14138, + "Ġseparation": 14139, + "Player": 14140, + "Ġbip": 14141, + "Ġbeloved": 14142, + "Ġcounting": 14143, + "ĠEye": 14144, + "around": 14145, + "inging": 14146, + "Ġtablet": 14147, + "Ġoffence": 14148, + "inance": 14149, + "have": 14150, + "ĠInfo": 14151, + "ĠNinja": 14152, + "Ġprotective": 14153, + "ĠCass": 14154, + "Mac": 14155, + "ĠQuality": 14156, + "North": 14157, + "Ġic": 14158, + "ĠCuba": 14159, + "ĠChronicle": 14160, + "ĠProperty": 14161, + "Ġfastest": 14162, + "otos": 14163, + "ĠGerm": 14164, + "OWN": 14165, + "Ġboom": 14166, + "ĠStanley": 14167, + "erguson": 14168, + "Ġclever": 14169, + "Ġenters": 14170, + "mode": 14171, + "terior": 14172, + "ĠSens": 14173, + "Ġlinear": 14174, + "ARK": 14175, + "Ġcomparing": 14176, + "Ġpurely": 14177, + "Ġsafer": 14178, + "ĠPotter": 14179, + "Ġcups": 14180, + "RT": 14181, + "Ġgluc": 14182, + "Ġattributed": 14183, + "Ġdupl": 14184, + "ĠPap": 14185, + "Ġprecious": 14186, + "Ġpa": 14187, + "ictionary": 14188, + "ĠTig": 14189, + "ĠToo": 14190, + "olutions": 14191, + "stan": 14192, + "Ġrobots": 14193, + "Ġlobb": 14194, + "Ġstatute": 14195, + "Ġprevention": 14196, + "western": 14197, + "160": 14198, + "ĠActive": 14199, + "ĠMaria": 14200, + "hal": 14201, + "None": 14202, + "ellar": 14203, + "ĠKB": 14204, + "ĠPartners": 14205, + "ĠSingle": 14206, + "ĠFollowing": 14207, + "ango": 14208, + "acious": 14209, + "Ġthou": 14210, + "Ġkg": 14211, + "Ġinfluential": 14212, + "ĠFriends": 14213, + "Sur": 14214, + "ainted": 14215, + "Ġforums": 14216, + "Ġstarter": 14217, + "Ġcitizenship": 14218, + "ĠElection": 14219, + "onge": 14220, + "otation": 14221, + "osph": 14222, + ";;;;": 14223, + "utical": 14224, + "pur": 14225, + "eren": 14226, + "Ġaccusations": 14227, + "bitious": 14228, + "abbit": 14229, + "ĠOrd": 14230, + "Posted": 14231, + "irk": 14232, + "Ġsensitivity": 14233, + "iche": 14234, + "ĠAmy": 14235, + "ĠFab": 14236, + "Ġsummit": 14237, + "Ġpedest": 14238, + "Ġrubber": 14239, + "Ġagricultural": 14240, + "Ġcancel": 14241, + "AE": 14242, + "Ġinaug": 14243, + "Ġcontam": 14244, + "Ġfirmly": 14245, + "iw": 14246, + "stage": 14247, + "ĠKan": 14248, + "Ġtier": 14249, + "Ġinvention": 14250, + "Ġtranslated": 14251, + "ĠRules": 14252, + "Box": 14253, + "Twitter": 14254, + "IDS": 14255, + "Ġpizza": 14256, + "Ġdebug": 14257, + "ĠDrop": 14258, + "vs": 14259, + "Ġhorses": 14260, + "big": 14261, + "Ġboring": 14262, + "Ġhood": 14263, + "ĠMcCain": 14264, + "atched": 14265, + "ĠBros": 14266, + "Ġskip": 14267, + "Ġessay": 14268, + "stat": 14269, + "ĠLegends": 14270, + "Ġammunition": 14271, + "auc": 14272, + "Ġshooter": 14273, + "Ġunh": 14274, + "Ġsupplied": 14275, + "Ġgeneric": 14276, + "ĠSK": 14277, + "iban": 14278, + "yrics": 14279, + "Ġ255": 14280, + "Ġclimbing": 14281, + "Former": 14282, + "Ġflip": 14283, + "Ġjumping": 14284, + "Ġfrustration": 14285, + "ĠTerry": 14286, + "Ġneighborhoods": 14287, + "Ġmedian": 14288, + "bean": 14289, + "Ġbrains": 14290, + "Following": 14291, + "Ġshaped": 14292, + "Ġdraws": 14293, + "Ġaltered": 14294, + "Jack": 14295, + "Ġrecipes": 14296, + "Ġskilled": 14297, + "wealth": 14298, + "achi": 14299, + "election": 14300, + "Ġbehaviors": 14301, + "deals": 14302, + "ĠUntil": 14303, + "Fe": 14304, + "Ġdeclaration": 14305, + "marks": 14306, + "ĠBetween": 14307, + "celona": 14308, + "Ġreson": 14309, + "Ġbubble": 14310, + "Among": 14311, + "Ġimperial": 14312, + "GS": 14313, + "Ġfeminist": 14314, + "2005": 14315, + "ĠKyle": 14316, + "Ġaccounting": 14317, + "ĠTele": 14318, + "ĠTyr": 14319, + "Ġconnecting": 14320, + "Ġrehab": 14321, + "ĠPred": 14322, + "sim": 14323, + "Ġmeantime": 14324, + "Ġphysician": 14325, + "MW": 14326, + "ĠCampbell": 14327, + "ĠBrandon": 14328, + "Ġcontributing": 14329, + "ĠRule": 14330, + "ĠWeight": 14331, + "ĠNap": 14332, + "Ġinteractive": 14333, + "Ġvag": 14334, + "Ġhelmet": 14335, + "ĠComb": 14336, + "four": 14337, + "Ġshipped": 14338, + "Ġcompleting": 14339, + "ĠPD": 14340, + "PDATE": 14341, + "Ġspreading": 14342, + "Ġscary": 14343, + "erving": 14344, + "ĠGas": 14345, + "Ġfrank": 14346, + "school": 14347, + "Ġromantic": 14348, + "Ġstabil": 14349, + "Rob": 14350, + "Ġaccurately": 14351, + "Ġacute": 14352, + "ĠHann": 14353, + "Ġsymbols": 14354, + "Ġcivilization": 14355, + "ĠAW": 14356, + "Ġlightning": 14357, + "Ġconsiders": 14358, + "Ġvenue": 14359, + "Ġ×": 14360, + "Ġoven": 14361, + "ĠSF": 14362, + "his": 14363, + "Ġnu": 14364, + "ĠLearn": 14365, + "Ġpeoples": 14366, + "Ġstd": 14367, + "Ġslee": 14368, + "Ġslic": 14369, + "ĠStatistics": 14370, + "Ġcorners": 14371, + "ĠBaker": 14372, + "Ġ:)": 14373, + "mentation": 14374, + "olver": 14375, + "Ġlaughing": 14376, + "ĠTodd": 14377, + "onde": 14378, + "ĠHills": 14379, + "Ġnuts": 14380, + "ĠWoman": 14381, + "plane": 14382, + "Ġliver": 14383, + "ĠInside": 14384, + "Sorry": 14385, + "Ġagrees": 14386, + "Ġfundament": 14387, + "ĠFisher": 14388, + "Ġauction": 14389, + "Ġthreads": 14390, + "glas": 14391, + "ĠBasic": 14392, + "ĠNat": 14393, + "Ġlacking": 14394, + "Ġcelebration": 14395, + "ju": 14396, + "Ġsilly": 14397, + "Euro": 14398, + "Ġtatt": 14399, + "ighty": 14400, + "controlled": 14401, + "Test": 14402, + "ĠSingh": 14403, + "Ġrage": 14404, + "Ġrhyth": 14405, + "offic": 14406, + "ĠPhantom": 14407, + "Ġheadlines": 14408, + "Ġresponding": 14409, + "ĠMorning": 14410, + "Ġvitamin": 14411, + "Ġboots": 14412, + "ĠSite": 14413, + "alin": 14414, + "pi": 14415, + "Ġviral": 14416, + "ĠUC": 14417, + "DER": 14418, + "ĠSex": 14419, + "Ġstocks": 14420, + "current": 14421, + "Ġchurches": 14422, + "ĠRare": 14423, + "ĠMurphy": 14424, + "Ġdenial": 14425, + "ĠGaming": 14426, + "Ġtoug": 14427, + "Ġnick": 14428, + "Ġmakers": 14429, + "ĠRonald": 14430, + "Ġgenerous": 14431, + "ĠDoc": 14432, + "ĠMorris": 14433, + "Ġtransformed": 14434, + "ĠNormal": 14435, + "Ġ104": 14436, + "ĠKickstarter": 14437, + "ĠUpon": 14438, + "Online": 14439, + "ĠIRS": 14440, + "Ġwrap": 14441, + "Ġloving": 14442, + "Ġarrives": 14443, + "ĠDue": 14444, + "Ġheter": 14445, + "ĠMade": 14446, + "Ġrental": 14447, + "Ġbelongs": 14448, + "Ġattorneys": 14449, + "Ġcrops": 14450, + "Ġmatched": 14451, + "ulum": 14452, + "oline": 14453, + "109": 14454, + "Ġdispar": 14455, + "Ġbuyers": 14456, + "ĠCambridge": 14457, + "Ġethics": 14458, + "roups": 14459, + "Ġjustified": 14460, + "Ġmarginal": 14461, + "Ġrespected": 14462, + "winning": 14463, + "Ġnodded": 14464, + "ĠSerge": 14465, + "ĠFormer": 14466, + "Craft": 14467, + "################": 14468, + "ĠWarner": 14469, + "Ġdash": 14470, + "ete": 14471, + "Ġentert": 14472, + "ĠEscape": 14473, + "outheast": 14474, + "Ġknees": 14475, + "ĠBomb": 14476, + "Ġrug": 14477, + "Pass": 14478, + "Ġattitudes": 14479, + "government": 14480, + "ĠPrior": 14481, + "Ġqualities": 14482, + "Ġnotification": 14483, + "ĠPhone": 14484, + "lie": 14485, + "Ġanticipated": 14486, + "ĠCombat": 14487, + "ĠBarry": 14488, + "Ġ1982": 14489, + "Users": 14490, + "oner": 14491, + "Ġcomputing": 14492, + "ĠConnecticut": 14493, + "Ġlesser": 14494, + "Ġpeers": 14495, + "ĠCu": 14496, + "Ġtechnically": 14497, + "Ġsubmission": 14498, + "ĠUniversal": 14499, + "Ġmanually": 14500, + "ourge": 14501, + "Ġrespondents": 14502, + "ĠBTC": 14503, + "ĠHost": 14504, + "Ġfare": 14505, + "ĠBird": 14506, + "Ġreceipt": 14507, + "also": 14508, + "Ġjack": 14509, + "Ġagriculture": 14510, + "Ġskull": 14511, + "Ġ!=": 14512, + "Ġpassive": 14513, + "ĠCI": 14514, + "Ġsocieties": 14515, + "Ġreminded": 14516, + "Ġinterference": 14517, + "Buy": 14518, + "Ġâľ": 14519, + "gon": 14520, + "Ġscrutiny": 14521, + "ĠWitch": 14522, + "Ġconducting": 14523, + "Ġãĥ": 14524, + "Ġexchanges": 14525, + "ĠMitchell": 14526, + "Ġinhabit": 14527, + "Ġtwist": 14528, + "BD": 14529, + "Ġwherever": 14530, + "groupon": 14531, + "Ġjokes": 14532, + "ĠBenjamin": 14533, + "ĠRandom": 14534, + "frame": 14535, + "ĠLions": 14536, + "Ġhighlighted": 14537, + "ĠArkansas": 14538, + "Ent": 14539, + "Ġpile": 14540, + "Ġprelim": 14541, + "gs": 14542, + "minded": 14543, + "Ġfelony": 14544, + "ĠGA": 14545, + "ĠLuck": 14546, + "Ġpractically": 14547, + "ĠBos": 14548, + "Ġactress": 14549, + "Dam": 14550, + "ĠBou": 14551, + "Ġvisa": 14552, + "Ġembedded": 14553, + "Ġhybrid": 14554, + "Ġearliest": 14555, + "Ġsooner": 14556, + "social": 14557, + "ĠHA": 14558, + "Ġsteep": 14559, + "Ġdisadvant": 14560, + "Ġexploit": 14561, + "ĠEgg": 14562, + "ĠUltra": 14563, + "Ġnecessity": 14564, + "Local": 14565, + "iege": 14566, + "Ġdated": 14567, + "Ġmasses": 14568, + "Ġsubscription": 14569, + "pless": 14570, + "Ġanonym": 14571, + "Ġpresumably": 14572, + "Blue": 14573, + "Their": 14574, + "asketball": 14575, + "ĠPhilip": 14576, + "Ġcomed": 14577, + "loaded": 14578, + "rane": 14579, + "Ġreflection": 14580, + "China": 14581, + "Ġextends": 14582, + "Ġforming": 14583, + "Ġunders": 14584, + "2001": 14585, + "Ġgrat": 14586, + "Ġconcentrations": 14587, + "Ġinsulin": 14588, + "Ġsecular": 14589, + "Ġwhilst": 14590, + "Ġwinners": 14591, + "Advertisements": 14592, + "Ġdeliberately": 14593, + "ĠWorking": 14594, + "Ġsink": 14595, + "etics": 14596, + "dale": 14597, + "Ġmandate": 14598, + "Ġgram": 14599, + "Ġvacation": 14600, + "Ġwarnings": 14601, + "ripp": 14602, + "ĠTHAT": 14603, + "Ġcommentary": 14604, + "Ġintu": 14605, + "Ġaest": 14606, + "Ġreasoning": 14607, + "Ġbreakdown": 14608, + "ĠZombie": 14609, + "Ġ-->": 14610, + "ĠPolitical": 14611, + "cott": 14612, + "Ġthrust": 14613, + "Ġtechnological": 14614, + "Ġdeciding": 14615, + "Ġtrafficking": 14616, + "Long": 14617, + "Welcome": 14618, + "prising": 14619, + "ĠCommunications": 14620, + "Ġendors": 14621, + "Ġswift": 14622, + "Ġmetabol": 14623, + "coins": 14624, + "resa": 14625, + "ĠHTTP": 14626, + "Ġenroll": 14627, + "ĠHappy": 14628, + "usr": 14629, + "intage": 14630, + "Ġ[\"": 14631, + "uably": 14632, + "ĠMaterial": 14633, + "Ġrepeal": 14634, + "Sept": 14635, + "kh": 14636, + "ĠModi": 14637, + "Ġunderneath": 14638, + "ĠIL": 14639, + "shore": 14640, + "Ġdiagnosed": 14641, + "aceutical": 14642, + "Ġshower": 14643, + "aux": 14644, + "ĠSwitch": 14645, + "ĠStrength": 14646, + "Ġjihad": 14647, + "national": 14648, + "Ġtrauma": 14649, + "ussy": 14650, + "oni": 14651, + "Ġconsolid": 14652, + "Ġcalories": 14653, + "ĠFlynn": 14654, + "agged": 14655, + "168": 14656, + "ĠPink": 14657, + "Ġfulfill": 14658, + "Ġchains": 14659, + "Ġnotably": 14660, + "ĠAV": 14661, + "Life": 14662, + "ĠChuck": 14663, + "mus": 14664, + "ĠUrban": 14665, + "ĠHend": 14666, + "Ġdeposit": 14667, + "ĠSad": 14668, + "Ġaffair": 14669, + "ORK": 14670, + "ieval": 14671, + "ĠFDA": 14672, + "Ġtrop": 14673, + "ĠOverall": 14674, + "Ġvirtue": 14675, + "Ġsatisfaction": 14676, + "aund": 14677, + "Ġlun": 14678, + "ĠSwitzerland": 14679, + "ĠOperation": 14680, + "process": 14681, + "Ġshook": 14682, + "Ġcounties": 14683, + "leased": 14684, + "ĠCharlotte": 14685, + "112": 14686, + "Ġtranscript": 14687, + "Ġredd": 14688, + "push": 14689, + "ĠHey": 14690, + "ĠAnalysis": 14691, + "[\"": 14692, + "Ġalternatives": 14693, + "ardless": 14694, + "Ġeleph": 14695, + "Ġprejud": 14696, + "ĠLeaf": 14697, + "Having": 14698, + "ĠHub": 14699, + "Ġexpressions": 14700, + "ĠVolume": 14701, + "Ġshocking": 14702, + "ĠReds": 14703, + "Ġreadily": 14704, + "Ġplanets": 14705, + "adata": 14706, + "Ġcollapsed": 14707, + "ĠMadrid": 14708, + "Ġirrit": 14709, + "ipper": 14710, + "ĠEnc": 14711, + "ĠWire": 14712, + "Ġbuzz": 14713, + "ĠGP": 14714, + "asha": 14715, + "Ġaccidentally": 14716, + "uru": 14717, + "Ġfrustrated": 14718, + "ĠSA": 14719, + "Ġhungry": 14720, + "ĠHuff": 14721, + "Ġlabels": 14722, + "anto": 14723, + "ĠEP": 14724, + "Ġbarriers": 14725, + ")|": 14726, + "ĠBerkeley": 14727, + "ĠJets": 14728, + "Ġpairs": 14729, + "ĠLan": 14730, + "James": 14731, + "ĠBear": 14732, + "Ġhumor": 14733, + "ĠLiberty": 14734, + "Ġmagnitude": 14735, + "Ġaging": 14736, + "ĠMason": 14737, + "Ġfriendship": 14738, + "umbling": 14739, + "Ġemerge": 14740, + "Ġnewspapers": 14741, + "Ġambitious": 14742, + "ĠRichards": 14743, + "aternal": 14744, + "Ġ1981": 14745, + "Ġcookies": 14746, + "Ġsculpt": 14747, + "Ġpursuit": 14748, + "Location": 14749, + "Ġscripts": 14750, + "pc": 14751, + "Ġarrangements": 14752, + "Ġdiameter": 14753, + "Ġloses": 14754, + "amation": 14755, + "Ġliqu": 14756, + "ĠJake": 14757, + "arette": 14758, + "Ġunderstands": 14759, + "ĠZen": 14760, + "vm": 14761, + "Ġapprove": 14762, + "Ġwip": 14763, + "Ġultra": 14764, + "Ġintend": 14765, + "ĠDI": 14766, + "ascular": 14767, + "Ġstays": 14768, + "ĠKor": 14769, + "ĠKl": 14770, + "Ġinvesting": 14771, + "La": 14772, + "Ġbelieving": 14773, + "bad": 14774, + "mouth": 14775, + "Ġtaxpayer": 14776, + "ãĥĥ": 14777, + "ĠQuebec": 14778, + "Ġlap": 14779, + "ĠSwiss": 14780, + "drop": 14781, + "Ġdrain": 14782, + "iri": 14783, + "etc": 14784, + "ften": 14785, + "ĠNex": 14786, + "Ġstraw": 14787, + "Ġscreaming": 14788, + "Ġcounted": 14789, + "Ġdamaging": 14790, + "Ġambassador": 14791, + "century": 14792, + "Ġprox": 14793, + "Ġarrests": 14794, + "uv": 14795, + "ilateral": 14796, + "ĠCharg": 14797, + "Ġprescribed": 14798, + "Ġindependently": 14799, + "Ġfierce": 14800, + "ĠBaby": 14801, + "Ġbrave": 14802, + "Ġsuits": 14803, + "=>": 14804, + "Ġbaseline": 14805, + "ĠRate": 14806, + "Ġislands": 14807, + "Ġ((": 14808, + "green": 14809, + "ixels": 14810, + "Ġnamely": 14811, + "ĠVillage": 14812, + "than": 14813, + "amy": 14814, + "Version": 14815, + "gmail": 14816, + "entials": 14817, + "ĠSud": 14818, + "ĠMelbourne": 14819, + "Ġarriving": 14820, + "Ġquantum": 14821, + "eff": 14822, + "ropolitan": 14823, + "Tri": 14824, + "Ġfuneral": 14825, + "ĠIR": 14826, + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ": 14827, + "ĠCob": 14828, + "itably": 14829, + "Ġturb": 14830, + "Ġcombo": 14831, + "Review": 14832, + "Ġdeployment": 14833, + "uity": 14834, + "ĠBott": 14835, + "Ġinvisible": 14836, + "Ġrendering": 14837, + "Ġunlocked": 14838, + "Ġaqu": 14839, + "ĠVladimir": 14840, + "Ġpad": 14841, + "ĠBrain": 14842, + "ĠLegacy": 14843, + "dragon": 14844, + "ĠKurdish": 14845, + "Ġsounded": 14846, + "Ġdetained": 14847, + "ĠDM": 14848, + "gary": 14849, + "Ġdaughters": 14850, + "Ġdisturbing": 14851, + "uka": 14852, + "ĠParad": 14853, + "Ġtast": 14854, + "Ġunfortunate": 14855, + "Ġul": 14856, + "emin": 14857, + "Ġattendance": 14858, + "trl": 14859, + "Ġparks": 14860, + "ĠMemorial": 14861, + "ĠAlice": 14862, + "othy": 14863, + "guard": 14864, + "ĠDise": 14865, + "ĠShan": 14866, + "ĠForum": 14867, + "Rich": 14868, + "Ġshifted": 14869, + "uez": 14870, + "Ġlighter": 14871, + "ĠMagn": 14872, + "Ġcod": 14873, + "Sch": 14874, + "hammad": 14875, + "Pub": 14876, + "350": 14877, + "ĠPokemon": 14878, + "Ġprototype": 14879, + "Ġunre": 14880, + "Base": 14881, + "ĠStudents": 14882, + "ĠReply": 14883, + "ĠCommunist": 14884, + "Ġgau": 14885, + "ĠTyler": 14886, + "IZ": 14887, + "Ġparticipated": 14888, + "Ġsuprem": 14889, + "ĠDetails": 14890, + "Ġvessels": 14891, + "rod": 14892, + "Ġtribe": 14893, + "keep": 14894, + "Ġassumptions": 14895, + "Ġpound": 14896, + "Ġcrude": 14897, + "ĠAvailable": 14898, + "Ġswimming": 14899, + "Ġinclusion": 14900, + "Ġadvances": 14901, + "culation": 14902, + "Ġconservation": 14903, + "Ġoverd": 14904, + "ĠBuffalo": 14905, + "Article": 14906, + "edge": 14907, + "Ġawa": 14908, + "ĠMadison": 14909, + "Ġsidew": 14910, + "Ġcatast": 14911, + "ĠKrist": 14912, + "ucle": 14913, + "ĠHighway": 14914, + "ĠTerror": 14915, + "Ġactivation": 14916, + "Ġunconscious": 14917, + "ĠSatan": 14918, + "ĠSusan": 14919, + "illery": 14920, + "Ġarranged": 14921, + "iop": 14922, + "Ġrumors": 14923, + "urring": 14924, + "think": 14925, + "ĠKeith": 14926, + "ĠKind": 14927, + "Ġavoiding": 14928, + "byn": 14929, + "nut": 14930, + "ĠSpeaker": 14931, + "rus": 14932, + "names": 14933, + "Ġguilt": 14934, + "ĠOlympics": 14935, + "Ġsail": 14936, + "ĠMes": 14937, + "levant": 14938, + "ĠColumbus": 14939, + "aft": 14940, + "City": 14941, + "South": 14942, + "ĠHarvey": 14943, + "ĠPun": 14944, + "Several": 14945, + "Ġmentally": 14946, + "Ġimpress": 14947, + "mount": 14948, + "ĠUbuntu": 14949, + "âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ": 14950, + "ĠSuperman": 14951, + "ĠMPs": 14952, + "Ġintentions": 14953, + "ĠRacing": 14954, + "Ġlikelihood": 14955, + "Ġ240": 14956, + "Total": 14957, + "Ġtoys": 14958, + "ĠWatson": 14959, + "Ġurge": 14960, + "Lear": 14961, + "ĠPaper": 14962, + "Ġoccurring": 14963, + "ĠBeng": 14964, + "ĠCert": 14965, + "Ġstones": 14966, + "Tim": 14967, + "ĠTwin": 14968, + "zb": 14969, + "ĠDynam": 14970, + "Ġpolitician": 14971, + "kens": 14972, + "ĠEnterprise": 14973, + "UTERS": 14974, + "Ġabol": 14975, + "Ġrefresh": 14976, + "Ġarbitrary": 14977, + "pection": 14978, + "Ġtroubles": 14979, + "Ġ});": 14980, + "tv": 14981, + "Ġpilots": 14982, + "Ġdistribute": 14983, + "Ġaudit": 14984, + "Ġpause": 14985, + "original": 14986, + "Ġrivals": 14987, + "£": 14988, + "Fig": 14989, + "TL": 14990, + "abil": 14991, + "rying": 14992, + "Lin": 14993, + "ioned": 14994, + "lon": 14995, + "Ġfancy": 14996, + "Ġcrashed": 14997, + "Ġtract": 14998, + "Ġshed": 14999, + "Ġconsume": 15000, + "Based": 15001, + "download": 15002, + "init": 15003, + "Ġvoltage": 15004, + "Introdu": 15005, + "Ġcondemned": 15006, + "ĠFinance": 15007, + "respect": 15008, + "Ġexcluded": 15009, + "Ġestablishing": 15010, + "heric": 15011, + "Ġheritage": 15012, + "Ġspectacular": 15013, + "Ġunst": 15014, + "ĠSnowden": 15015, + "ĠLane": 15016, + "San": 15017, + "Ġprotections": 15018, + "struction": 15019, + "incinn": 15020, + "Ġmacro": 15021, + "Custom": 15022, + "iosity": 15023, + "Ġesp": 15024, + "Ġfunctioning": 15025, + "Ġmush": 15026, + "Ġpuzzle": 15027, + "Ġethical": 15028, + "Mal": 15029, + "Ġgoverning": 15030, + "ĠFerguson": 15031, + "Ġrestored": 15032, + "Ġstressed": 15033, + "ĠCounter": 15034, + "ĠKas": 15035, + "clip": 15036, + "ANS": 15037, + "Ġseiz": 15038, + "UK": 15039, + "byss": 15040, + "oldown": 15041, + "api": 15042, + "Ġpermanently": 15043, + "ounters": 15044, + "West": 15045, + "Through": 15046, + "Light": 15047, + "atoes": 15048, + "Ġneat": 15049, + "Ġcord": 15050, + "urer": 15051, + "Ġseverely": 15052, + "ĠAven": 15053, + "Ġinterrog": 15054, + "Ġtriple": 15055, + "Given": 15056, + "Number": 15057, + "Ġarise": 15058, + "Ġsher": 15059, + "plant": 15060, + "Ġflower": 15061, + "ĠCou": 15062, + "Ġate": 15063, + "Ġnewer": 15064, + "bul": 15065, + "Ġmeanwhile": 15066, + "ĠLair": 15067, + "Ġadjustment": 15068, + "ĠCopyright": 15069, + "Ġdivers": 15070, + "iological": 15071, + "Ġgamers": 15072, + "oat": 15073, + "Ġhistorically": 15074, + "Ġanalog": 15075, + "Ġlongtime": 15076, + "Ġprescription": 15077, + "ĠMist": 15078, + "ĠHyper": 15079, + "ĠMaine": 15080, + "ĠDeity": 15081, + "Ġmultipl": 15082, + "ĠReincarn": 15083, + "ĠHyd": 15084, + "ĠPic": 15085, + "Sil": 15086, + "rants": 15087, + "ĠCris": 15088, + ".;": 15089, + "({": 15090, + "ependence": 15091, + "Ġrecy": 15092, + "ateur": 15093, + "Ġquad": 15094, + "Ġglob": 15095, + "Ġconced": 15096, + "team": 15097, + "Ġcapitalist": 15098, + "ĠLot": 15099, + "Ġroyal": 15100, + "ĠCyber": 15101, + "Ġblacks": 15102, + "metic": 15103, + "riv": 15104, + "ĠDanny": 15105, + "Ġspo": 15106, + "ĠRO": 15107, + "Ġanimated": 15108, + "rypted": 15109, + "ĠDeputy": 15110, + "Ġrendered": 15111, + "FE": 15112, + "Ġstreak": 15113, + "Ġclouds": 15114, + "ĠDoug": 15115, + "~~~~~~~~": 15116, + "Ġdiscour": 15117, + "ĠVeh": 15118, + "Ġpsychology": 15119, + "ĠJourney": 15120, + "Ġcrystal": 15121, + "ĠFrost": 15122, + "Ġsuspicion": 15123, + "Ġrelate": 15124, + "orus": 15125, + "ĠCrypt": 15126, + "ĠNVIDIA": 15127, + "comed": 15128, + "uting": 15129, + "incinnati": 15130, + "Ġvulnerability": 15131, + "ostic": 15132, + "Ġisolation": 15133, + "Ġcooling": 15134, + "ĠCoalition": 15135, + "Ġ119": 15136, + "Four": 15137, + "ĠDeal": 15138, + "Ġâī": 15139, + "semble": 15140, + "rament": 15141, + "ĠBarcelona": 15142, + "Ġ102": 15143, + "Ġcocaine": 15144, + "ocalypse": 15145, + "Feb": 15146, + "ogenic": 15147, + "Ġmutation": 15148, + "Ġcryptoc": 15149, + "ĠKel": 15150, + "ĠGit": 15151, + "ais": 15152, + "Ġsisters": 15153, + "ANK": 15154, + "Ġactivate": 15155, + "Ter": 15156, + "Ġdread": 15157, + "ylon": 15158, + "Ġpropri": 15159, + "Aust": 15160, + "ĠDefault": 15161, + "Ġoutdoor": 15162, + "Ġsheer": 15163, + "ceive": 15164, + "Ġgently": 15165, + "о": 15166, + "Program": 15167, + "ĠâĨĴ": 15168, + "Ġvegan": 15169, + "ĠCrus": 15170, + "Ġresponsibilities": 15171, + "ĠHR": 15172, + "OLD": 15173, + "Ġprevents": 15174, + "Ġstiff": 15175, + "ĠWere": 15176, + "Ġathletic": 15177, + "ĠScore": 15178, + "Ġ):": 15179, + "Ġcolumns": 15180, + "ĠLoc": 15181, + "available": 15182, + "ĠFram": 15183, + "ĠSessions": 15184, + "Ġcompanion": 15185, + "Ġpacks": 15186, + "140": 15187, + "ĠKnights": 15188, + "Ġfart": 15189, + "Ġstreams": 15190, + "Ġshore": 15191, + "Ġappeals": 15192, + "ĠPerformance": 15193, + "haul": 15194, + "ĠStra": 15195, + "ĠNag": 15196, + "103": 15197, + "ĠTransportation": 15198, + "BB": 15199, + "Ev": 15200, + "zan": 15201, + "Public": 15202, + "Ġtwin": 15203, + "ulsion": 15204, + "Mult": 15205, + "Ġelectro": 15206, + "Ġstatue": 15207, + "ationally": 15208, + "ĠNort": 15209, + "Ġinspection": 15210, + "/*": 15211, + "igue": 15212, + "Ġcompassion": 15213, + "ĠTales": 15214, + "ĠStein": 15215, + "ĠScreen": 15216, + "ĠBug": 15217, + "ĠLion": 15218, + "girl": 15219, + "Ġwithdrawal": 15220, + "Ġobjectives": 15221, + "Ġbloody": 15222, + "Ġpreliminary": 15223, + "Ġjacket": 15224, + "Ġdimensions": 15225, + "ĠCool": 15226, + "ĠOccup": 15227, + "Ġwreck": 15228, + "Ġdoubled": 15229, + "anking": 15230, + "Ġ1975": 15231, + "Ġglasses": 15232, + "ĠWang": 15233, + "prov": 15234, + "Path": 15235, + "connected": 15236, + "ĠMulti": 15237, + "ĠNorway": 15238, + "agonist": 15239, + "Ġfeared": 15240, + "Ġtouching": 15241, + "Ġarguably": 15242, + "¯¯¯¯¯¯¯¯": 15243, + "ĠNCAA": 15244, + "chem": 15245, + "Ġspat": 15246, + "ĠWWE": 15247, + "ĠCel": 15248, + "igger": 15249, + "Ġattacker": 15250, + "ĠJoin": 15251, + "object": 15252, + "etta": 15253, + "Ġeliminated": 15254, + "det": 15255, + "Ġdestruct": 15256, + "ĠLucas": 15257, + "ctuary": 15258, + "180": 15259, + "ĠBrady": 15260, + "ĠBlues": 15261, + "Bay": 15262, + "aukee": 15263, + "Ġtimeline": 15264, + "Ġdelegates": 15265, + "written": 15266, + "ufficient": 15267, + "Ġshapes": 15268, + "Copyright": 15269, + "ouble": 15270, + "service": 15271, + "Ġpione": 15272, + "Ġcolleges": 15273, + "Ġrows": 15274, + "Ġspite": 15275, + "Ġassessed": 15276, + "360": 15277, + "Ġlease": 15278, + "Ġconfidential": 15279, + "cker": 15280, + "ĠManning": 15281, + "ĠVoice": 15282, + "Ġsealed": 15283, + "Ġcalculate": 15284, + "NO": 15285, + "ĠAssistant": 15286, + "Ġteenager": 15287, + "ulent": 15288, + "atherine": 15289, + "Ġmock": 15290, + "Ġdiamond": 15291, + "Ġfest": 15292, + "Ġswitched": 15293, + "Ġresume": 15294, + "ĠPuerto": 15295, + "Ġlanes": 15296, + "iration": 15297, + "ĠSimilarly": 15298, + "Ġrod": 15299, + "ĠSel": 15300, + "ĠPalace": 15301, + "ĠLimited": 15302, + "eous": 15303, + "Ġvariant": 15304, + "Ġward": 15305, + "Ġ))": 15306, + "Show": 15307, + "OOK": 15308, + "Alex": 15309, + "ĠNep": 15310, + "bris": 15311, + "ĠWikipedia": 15312, + "Ġexceptional": 15313, + "Ġmanages": 15314, + "ĠDraw": 15315, + "Again": 15316, + "Ġcopper": 15317, + "utt": 15318, + "Ġexports": 15319, + "Ġportfolio": 15320, + "Ġelevated": 15321, + "Rated": 15322, + "ĠOtherwise": 15323, + "ĠTact": 15324, + "ĠShel": 15325, + "ĠTX": 15326, + "\"âĢĶ": 15327, + "Ġresur": 15328, + "ĠWa": 15329, + "venant": 15330, + "Ġmonetary": 15331, + "people": 15332, + "Email": 15333, + "Ġfifty": 15334, + "ĠSweet": 15335, + "ĠMalaysia": 15336, + "Ġconfusing": 15337, + "ĠRio": 15338, + "uda": 15339, + "utenant": 15340, + "\");": 15341, + "Ġpraised": 15342, + "Ġvolumes": 15343, + "turn": 15344, + "Ġmature": 15345, + "Ġnonprofit": 15346, + "Ġpassionate": 15347, + "ĠPrivate": 15348, + "Ġ103": 15349, + "Ġdescend": 15350, + "ç¥ŀ": 15351, + "uffy": 15352, + "headed": 15353, + "Whether": 15354, + "rien": 15355, + "zech": 15356, + "beit": 15357, + "Ġchrom": 15358, + "ĠMcM": 15359, + "Ġdancing": 15360, + "Ġeleg": 15361, + "ĠNoticed": 15362, + "115": 15363, + "Ġadvocacy": 15364, + "ENTS": 15365, + "ambling": 15366, + "ĠMinor": 15367, + "ĠFinn": 15368, + "Ġpriorities": 15369, + "Ġthereof": 15370, + "ĠStage": 15371, + "ĠRogers": 15372, + "Ġsubstitute": 15373, + "ĠJar": 15374, + "ĠJefferson": 15375, + "Ġlightly": 15376, + "102": 15377, + "ĠLisa": 15378, + "uits": 15379, + "ysical": 15380, + "Ġshifts": 15381, + "Ġdrones": 15382, + "Ġworkplace": 15383, + "Ġresid": 15384, + "ensed": 15385, + "ahn": 15386, + "Ġpreferences": 15387, + "server": 15388, + "Ġdebates": 15389, + "doc": 15390, + "ĠGods": 15391, + "Ġhelicopter": 15392, + "Ġhonour": 15393, + "Ġconsiderably": 15394, + "eded": 15395, + "ĠFemale": 15396, + "ĠAnne": 15397, + "Ġreun": 15398, + "ĠFace": 15399, + "ĠHallow": 15400, + "ĠBudget": 15401, + "Ġcondemn": 15402, + "Ġtender": 15403, + "Prof": 15404, + "ocratic": 15405, + "ĠTurner": 15406, + "ĠAgric": 15407, + "Ġ1976": 15408, + "Ġapt": 15409, + "disc": 15410, + "ĠFighter": 15411, + "ĠAur": 15412, + "Ġgarbage": 15413, + "input": 15414, + "ĠKarl": 15415, + "ĠOliver": 15416, + "ĠLanguage": 15417, + "kn": 15418, + "Non": 15419, + "ĠClar": 15420, + "Ġtraditions": 15421, + "Ġadvertisement": 15422, + "ĠSor": 15423, + "Ġarchive": 15424, + "Ġvillages": 15425, + "750": 15426, + "Ġimplementing": 15427, + "waukee": 15428, + "Ġdietary": 15429, + "Ġswitching": 15430, + "Republic": 15431, + "Ġvelocity": 15432, + "Ġcit": 15433, + "ĠAwards": 15434, + "Ġfinancing": 15435, + "Ġlasted": 15436, + ")]": 15437, + "Ġreminder": 15438, + "Person": 15439, + "Ġprecision": 15440, + "Ġdesigners": 15441, + "ĠFried": 15442, + "ĠBorder": 15443, + "Ġtragic": 15444, + "Ġwield": 15445, + "Ġinitiatives": 15446, + "ĠTank": 15447, + "wer": 15448, + "Ġjoins": 15449, + "Ro": 15450, + "inery": 15451, + "Ġarrow": 15452, + "Ġgenerating": 15453, + "founder": 15454, + "Ġsearches": 15455, + "Ġrandomly": 15456, + "Access": 15457, + "Ġbatch": 15458, + "Ġposed": 15459, + "lat": 15460, + "Ġpursuing": 15461, + "asa": 15462, + "Ġtestified": 15463, + "forming": 15464, + "ĠShar": 15465, + "wiki": 15466, + "ĠEither": 15467, + "Sometimes": 15468, + "Ġsenators": 15469, + "ĠJohnny": 15470, + "ĠTaliban": 15471, + "ĠGPS": 15472, + "\":\"/": 15473, + "ãģ®å": 15474, + "Ġanalyzed": 15475, + "ĠRubio": 15476, + "ĠMovement": 15477, + "opard": 15478, + "iii": 15479, + "Stand": 15480, + "fight": 15481, + "Ġignoring": 15482, + "iang": 15483, + "ĠGN": 15484, + "soever": 15485, + "ĠSTAT": 15486, + "Ġrefusing": 15487, + "Ġsweat": 15488, + "Ġbay": 15489, + "PORT": 15490, + "irmed": 15491, + "aky": 15492, + "Ġdispro": 15493, + "Ġlabeled": 15494, + "Ġ108": 15495, + "Hello": 15496, + "Ġpleasant": 15497, + "aba": 15498, + "Ġtriumph": 15499, + "Ġaboard": 15500, + "Ġincom": 15501, + "ĠCrow": 15502, + "lett": 15503, + "Ġfolk": 15504, + "Ġchase": 15505, + "``": 15506, + "ĠBrus": 15507, + "Ġteens": 15508, + "cue": 15509, + "Ġterrain": 15510, + "hyd": 15511, + "ilight": 15512, + "ORY": 15513, + "Support": 15514, + "ews": 15515, + "lli": 15516, + "raints": 15517, + "ĠCand": 15518, + "Ġabused": 15519, + "achment": 15520, + "larg": 15521, + "Bas": 15522, + "ĠCancer": 15523, + "Ġ1978": 15524, + "Ġsupporter": 15525, + "access": 15526, + "ĠTermin": 15527, + "ĠTampa": 15528, + "ĠANY": 15529, + "Ġnewest": 15530, + "ĠCriminal": 15531, + "edu": 15532, + "Ġ1930": 15533, + "Ġadmits": 15534, + "Ġende": 15535, + "Ġfailures": 15536, + "urate": 15537, + "fulness": 15538, + "cycl": 15539, + "ĠSubject": 15540, + "Ġinfinite": 15541, + "three": 15542, + "WA": 15543, + "pit": 15544, + "ĠInstall": 15545, + "Rad": 15546, + "iliation": 15547, + "GM": 15548, + "Ġcontinent": 15549, + "Ġaccommodate": 15550, + "ĠClay": 15551, + "Ġpup": 15552, + "ĠFunction": 15553, + "Ġhammer": 15554, + "ĠAlberta": 15555, + "Ġrevised": 15556, + "Ġminorities": 15557, + "Ġmeasurement": 15558, + "Connell": 15559, + "Ġdisable": 15560, + "ĠMix": 15561, + "Incre": 15562, + "Ġfork": 15563, + "ĠRosen": 15564, + "Ġimplies": 15565, + "umblr": 15566, + "ANG": 15567, + "Ġproteins": 15568, + "Ġaggression": 15569, + "Ġfacilitate": 15570, + "SN": 15571, + "Ġillegally": 15572, + "uer": 15573, + "Ġacadem": 15574, + "Ġpuzz": 15575, + "ĠShift": 15576, + "pay": 15577, + "ollo": 15578, + "Ġaudiences": 15579, + "Build": 15580, + "Ġnoble": 15581, + "Ġsyntax": 15582, + "âĺħ": 15583, + "Ġbeam": 15584, + "ĠBed": 15585, + "ĠAld": 15586, + "Ġorigins": 15587, + "video": 15588, + "Ġ1977": 15589, + "ĠAssault": 15590, + "Ġgarage": 15591, + "Team": 15592, + "Ġverdict": 15593, + "Ġdwar": 15594, + "ĠVirtual": 15595, + "event": 15596, + "Keep": 15597, + "Ġsentiment": 15598, + "Ġwildlife": 15599, + "shirt": 15600, + "Ġburg": 15601, + "Ġrecommendation": 15602, + "represent": 15603, + "Ġgallery": 15604, + "owners": 15605, + "Ġscholar": 15606, + "Ġconvenience": 15607, + "ĠSwift": 15608, + "Ġconvinc": 15609, + "Cap": 15610, + "Ġwarfare": 15611, + "ĠVisual": 15612, + "Ġconstitute": 15613, + "Ġabort": 15614, + "ĠWeather": 15615, + "ĠLooking": 15616, + "ĠHem": 15617, + "Ġmartial": 15618, + "Ġincoming": 15619, + "etition": 15620, + "Ġtolerance": 15621, + "ĠCreated": 15622, + "Ġflows": 15623, + "ĠElder": 15624, + "Ġsouls": 15625, + "Ġfoul": 15626, + "ĠPain": 15627, + "ĠCAN": 15628, + "Ġ220": 15629, + "bc": 15630, + "hend": 15631, + "Ġgenius": 15632, + "Real": 15633, + "ĠWr": 15634, + "ometer": 15635, + "pad": 15636, + "Ġlimiting": 15637, + "ĠSi": 15638, + "ĠLore": 15639, + "ĠAdventures": 15640, + "Ġvaried": 15641, + "Disc": 15642, + "fin": 15643, + "ĠPersonal": 15644, + "Chris": 15645, + "Ġinvented": 15646, + "Ġdive": 15647, + "ĠRise": 15648, + "Ġoz": 15649, + "ĠComics": 15650, + "Ġexpose": 15651, + "ĠReb": 15652, + "letters": 15653, + "site": 15654, + "imated": 15655, + "Ġhacking": 15656, + "Ġeducated": 15657, + "ĠNobody": 15658, + "Ġdepri": 15659, + "Ġincentive": 15660, + "ãĤ·": 15661, + "Ġoversight": 15662, + "Ġtribes": 15663, + "ĠBelgium": 15664, + "Ġlicensing": 15665, + "ourt": 15666, + "Product": 15667, + "ahl": 15668, + "ĠGem": 15669, + "Ġspecialist": 15670, + "Ġcra": 15671, + "anners": 15672, + "ĠCorbyn": 15673, + "Ġ1973": 15674, + "READ": 15675, + "Ġsummar": 15676, + "Ġoverlook": 15677, + "ĠApplication": 15678, + "Ġinappropriate": 15679, + "Ġdownloaded": 15680, + "Que": 15681, + "ĠBears": 15682, + "Ġthumb": 15683, + "ĠCharacter": 15684, + "ĠReincarnated": 15685, + "ĠSid": 15686, + "Ġdemonstrates": 15687, + "sky": 15688, + "ĠBloomberg": 15689, + "ĠArray": 15690, + "ĠResults": 15691, + "ĠFourth": 15692, + "ĠEDT": 15693, + "ĠOscar": 15694, + "cend": 15695, + "Ġ106": 15696, + "ĠNULL": 15697, + "ĠHERE": 15698, + "match": 15699, + "ĠBrun": 15700, + "Ġglucose": 15701, + "ieg": 15702, + "egu": 15703, + "Ġcertified": 15704, + "Ġrelie": 15705, + "Ġhumanitarian": 15706, + "Ġprayers": 15707, + "King": 15708, + "Ġnan": 15709, + "hou": 15710, + "108": 15711, + "ulu": 15712, + "Ġrenewable": 15713, + "Ġdistinguish": 15714, + "Ġdense": 15715, + "ĠVent": 15716, + "ĠPackage": 15717, + "ĠBoss": 15718, + "Ġeditors": 15719, + "Ġmigr": 15720, + "Tra": 15721, + "ĠPeters": 15722, + "ĠArctic": 15723, + "2004": 15724, + "ĠCape": 15725, + "Ġlocally": 15726, + "Ġlasting": 15727, + "Ġhandy": 15728, + ".).": 15729, + "Pan": 15730, + "ĠRES": 15731, + "Index": 15732, + "Ġtensions": 15733, + "Ġformerly": 15734, + "Ġideological": 15735, + "Ġsensors": 15736, + "Ġdealers": 15737, + "Ġdefines": 15738, + "Sk": 15739, + "Ġproceeds": 15740, + "Ġproxy": 15741, + "azines": 15742, + "ĠBash": 15743, + "ĠPad": 15744, + "ĠCraft": 15745, + "ealous": 15746, + "Ġsheets": 15747, + "ometry": 15748, + "June": 15749, + "clock": 15750, + "TT": 15751, + "ĠTheatre": 15752, + "ĠBuzz": 15753, + "Ġchapters": 15754, + "Ġmillenn": 15755, + "Ġdough": 15756, + "ĠCongressional": 15757, + "Ġimagined": 15758, + "avior": 15759, + "Ġclinic": 15760, + "Ġ1945": 15761, + "Ġholder": 15762, + "root": 15763, + "olester": 15764, + "Ġrestart": 15765, + "BN": 15766, + "ĠHamas": 15767, + "ĠJob": 15768, + "Ġorb": 15769, + "Ġram": 15770, + "Ġdisclose": 15771, + "Ġtranslate": 15772, + "Ġimmigrant": 15773, + "Ġannoying": 15774, + "Ġtreaty": 15775, + "anium": 15776, + "ĠTea": 15777, + "ĠLegion": 15778, + "Ġcrowds": 15779, + "ĠBec": 15780, + "ĠAer": 15781, + "ohyd": 15782, + "Bro": 15783, + "Looking": 15784, + "Ġlbs": 15785, + "Ġaggress": 15786, + "Ġseam": 15787, + "Ġintercept": 15788, + "ĠMI": 15789, + "mercial": 15790, + "activ": 15791, + "ĠCit": 15792, + "Ġdimension": 15793, + "Ġconsistency": 15794, + "Ġrushing": 15795, + "ĠDouglas": 15796, + "Ġtrim": 15797, + "Install": 15798, + "icker": 15799, + "Ġshy": 15800, + "106": 15801, + "Ġmentions": 15802, + "pelled": 15803, + "ĠTak": 15804, + "cost": 15805, + "Ġclassroom": 15806, + "Ġfortune": 15807, + "driven": 15808, + "Ġunle": 15809, + "ĠWheel": 15810, + "Ġinvestor": 15811, + "ĠMasters": 15812, + "kit": 15813, + "Ġassociations": 15814, + "ĠEvolution": 15815, + "oping": 15816, + "uscript": 15817, + "Ġprovincial": 15818, + "ĠWalter": 15819, + "avi": 15820, + "SO": 15821, + "Ġunlimited": 15822, + "English": 15823, + "ĠCards": 15824, + "ĠEbola": 15825, + "nered": 15826, + "Ġrevenge": 15827, + "Ġoutright": 15828, + "umper": 15829, + "Ġfitting": 15830, + "ĠSolid": 15831, + "Ġformally": 15832, + "Ġproblematic": 15833, + "Ġhazard": 15834, + "Ġencryption": 15835, + "Ġstraightforward": 15836, + "ĠAK": 15837, + "Ġpse": 15838, + "ĠOrb": 15839, + "ĠChamber": 15840, + "ĠMak": 15841, + "Contents": 15842, + "Ġloyalty": 15843, + "Ġlyrics": 15844, + "ĠSym": 15845, + "Ġwelcomed": 15846, + "Ġcooked": 15847, + "Ġmonop": 15848, + "Ġnurse": 15849, + "Ġmisleading": 15850, + "Ġeternal": 15851, + "Ġshifting": 15852, + "Ġ+=": 15853, + "Vis": 15854, + "Ġinstitutional": 15855, + "illary": 15856, + "Ġpant": 15857, + "VERT": 15858, + "ĠACC": 15859, + "ĠEnh": 15860, + "Ġincon": 15861, + "ĠREUTERS": 15862, + "Ġdonated": 15863, + "âĢ¦âĢ¦âĢ¦âĢ¦": 15864, + "Intern": 15865, + "Ġexhibit": 15866, + "Ġtire": 15867, + "ĠRic": 15868, + "ĠChampion": 15869, + "ĠMuhammad": 15870, + "NING": 15871, + "ĠSoccer": 15872, + "Ġmobility": 15873, + "Ġvarying": 15874, + "ĠMovie": 15875, + "Ġlord": 15876, + "oak": 15877, + "Field": 15878, + "Ġvector": 15879, + "usions": 15880, + "Ġscrap": 15881, + "Ġenabling": 15882, + "make": 15883, + "Tor": 15884, + ".*": 15885, + "||": 15886, + "ĠWebsite": 15887, + "ĠNPC": 15888, + "Ġsocialist": 15889, + "ĠBilly": 15890, + "ĠAdditional": 15891, + "Ġcargo": 15892, + "Ġfarms": 15893, + "ĠSoon": 15894, + "ĠPrize": 15895, + "Ġmidnight": 15896, + "Ġ900": 15897, + "seen": 15898, + "ĠSpot": 15899, + "Ġsheep": 15900, + "Ġsponsored": 15901, + "ĠHi": 15902, + "ĠJump": 15903, + "Ġ1967": 15904, + "Microsoft": 15905, + "ĠAgent": 15906, + "Ġcharts": 15907, + "dir": 15908, + "Ġadjacent": 15909, + "Ġtricks": 15910, + "Ġmanga": 15911, + "Ġexagger": 15912, + "/>": 15913, + "football": 15914, + "ĠFCC": 15915, + "GC": 15916, + "ĠTier": 15917, + "andra": 15918, + "OUND": 15919, + "%),": 15920, + "Ġfruits": 15921, + "VC": 15922, + "ĠAA": 15923, + "Rober": 15924, + "Ġmidst": 15925, + "âĹ": 15926, + "anka": 15927, + "Ġlegislature": 15928, + "ĠNeil": 15929, + "Ġtourists": 15930, + "\"\"": 15931, + "ĠWarning": 15932, + "ĠNevertheless": 15933, + "ĠOfficial": 15934, + "ĠWhatever": 15935, + "Ġmold": 15936, + "Ġdrafted": 15937, + "Ġsubstances": 15938, + "Ġbreed": 15939, + "Ġtags": 15940, + "ĠTask": 15941, + "Ġverb": 15942, + "Ġmanufactured": 15943, + "comments": 15944, + "ĠPolish": 15945, + "Prov": 15946, + "Ġdetermines": 15947, + "Obama": 15948, + "kers": 15949, + "Ġutterly": 15950, + "Ġsect": 15951, + "sche": 15952, + "ĠGates": 15953, + "ĠChap": 15954, + "Ġaluminum": 15955, + "Ġzombie": 15956, + "ĠTouch": 15957, + "ĠUP": 15958, + "Ġsatisfy": 15959, + "Ġpredomin": 15960, + "ascript": 15961, + "Ġelaborate": 15962, + "Ġ1968": 15963, + "Ġmeasuring": 15964, + "ĠVari": 15965, + "anyahu": 15966, + "Ġsir": 15967, + "ulates": 15968, + "idges": 15969, + "ickets": 15970, + "ĠSpencer": 15971, + "TM": 15972, + "oubted": 15973, + "Ġprey": 15974, + "Ġinstalling": 15975, + "ĠCab": 15976, + "reed": 15977, + "reated": 15978, + "Supp": 15979, + "Ġwrist": 15980, + "ĠKerry": 15981, + "107": 15982, + "ĠKle": 15983, + "ĠRachel": 15984, + "Ġcotton": 15985, + "ĠARE": 15986, + "ĠEle": 15987, + "Control": 15988, + "Ġloads": 15989, + "ĠDod": 15990, + "anas": 15991, + "bone": 15992, + "Ġclassical": 15993, + "ĠRegional": 15994, + "ĠInteg": 15995, + "VM": 15996, + "Ġdesires": 15997, + "Ġautism": 15998, + "supported": 15999, + "ĠMessage": 16000, + "Ġcompact": 16001, + "writer": 16002, + "Ġ109": 16003, + "ĠHurricane": 16004, + "cision": 16005, + "Ġcycles": 16006, + "Ġdrill": 16007, + "Ġcolleague": 16008, + "Ġmaker": 16009, + "German": 16010, + "Ġmistaken": 16011, + "Sun": 16012, + "ĠGay": 16013, + "Ġwhatsoever": 16014, + "Ġsells": 16015, + "ĠAirl": 16016, + "liv": 16017, + "ĠOption": 16018, + "Ġsolved": 16019, + "Ġsectors": 16020, + "Ġhorizontal": 16021, + "Ġequation": 16022, + "ĠSkill": 16023, + "ĠBio": 16024, + "gement": 16025, + "ĠSnap": 16026, + "ĠLegal": 16027, + "Ġtrademark": 16028, + "Ġmakeup": 16029, + "Ġassembled": 16030, + "Ġsaves": 16031, + "ĠHalloween": 16032, + "ĠVermont": 16033, + "ĠFROM": 16034, + "Ġfarming": 16035, + "ĠPodcast": 16036, + "acceptable": 16037, + "ĠHigher": 16038, + "Ġasleep": 16039, + "ullivan": 16040, + "Ġreferen": 16041, + "ĠLev": 16042, + "Ġbullets": 16043, + "oko": 16044, + "HC": 16045, + "Ġstairs": 16046, + "Ġmaintains": 16047, + "ĠLower": 16048, + "ĠVi": 16049, + "Ġmarine": 16050, + "Ġacres": 16051, + "Ġcoordinator": 16052, + "ĠJoh": 16053, + "Ġcounterparts": 16054, + "ĠBrothers": 16055, + "Ġindict": 16056, + "bra": 16057, + "Ġchunk": 16058, + "Ġcents": 16059, + "Home": 16060, + "ĠMonth": 16061, + "Ġaccordingly": 16062, + "ifles": 16063, + "ĠGermans": 16064, + "ĠSyn": 16065, + "Hub": 16066, + "Ġeyeb": 16067, + "âĶĢâĶĢâĶĢâĶĢ": 16068, + "Ġranges": 16069, + "ĠHolland": 16070, + "ĠRobot": 16071, + "fc": 16072, + "Mike": 16073, + "Ġplasma": 16074, + "Ġswap": 16075, + "Ġathlete": 16076, + "ĠRams": 16077, + ",'\"": 16078, + "Ġinfections": 16079, + "Ġcorrid": 16080, + "Ġvib": 16081, + "Ġpatches": 16082, + "Ġtraditionally": 16083, + "Ġrevelation": 16084, + "Ġsweep": 16085, + "Ġglance": 16086, + "Ġinex": 16087, + "2003": 16088, + "ĠRaw": 16089, + "working": 16090, + "osures": 16091, + "ĠDat": 16092, + "ĠLynch": 16093, + "Ġleverage": 16094, + "ĠReid": 16095, + "Ġcorrelation": 16096, + "iances": 16097, + "avascript": 16098, + "Ġrepository": 16099, + "retty": 16100, + "Ġ1972": 16101, + "240": 16102, + "Ġoun": 16103, + "pol": 16104, + "ĠReed": 16105, + "Ġtactical": 16106, + "isite": 16107, + "Apple": 16108, + "ĠQuinn": 16109, + "Ġraped": 16110, + "illo": 16111, + "Europe": 16112, + "Ġalgorithms": 16113, + "ĠRodrig": 16114, + "iu": 16115, + "Ġillum": 16116, + "Ġfame": 16117, + "Ġintroducing": 16118, + "Ġdelays": 16119, + "ĠRaiders": 16120, + "Ġwhistle": 16121, + "Ġnovels": 16122, + "ĠReally": 16123, + "Ġderiv": 16124, + "Ġpublications": 16125, + "ĠNeither": 16126, + "ĠCommerce": 16127, + "Ġaston": 16128, + "language": 16129, + "Notes": 16130, + "ĠRoth": 16131, + "ĠFear": 16132, + "Ġmate": 16133, + "Ġparade": 16134, + "ĠQB": 16135, + "Ġmaneu": 16136, + "ĠCincinnati": 16137, + "mitting": 16138, + "Ġwaist": 16139, + "ĠRew": 16140, + "Ġdiscont": 16141, + "а": 16142, + "Ġstaring": 16143, + "Ġalias": 16144, + "Ġsecurities": 16145, + "Ġtoilet": 16146, + "ĠJedi": 16147, + "Ġunlaw": 16148, + "vised": 16149, + "////////": 16150, + "](": 16151, + "ĠWeiss": 16152, + "Ġprest": 16153, + "ĠCompan": 16154, + "Ġmemo": 16155, + "ĠGrace": 16156, + "July": 16157, + "ĠElite": 16158, + "center": 16159, + "ĠStay": 16160, + "Ġgalaxy": 16161, + "Ġtooth": 16162, + "ĠSettings": 16163, + "Ġsubjected": 16164, + "ãĤ¦": 16165, + "Ġlineback": 16166, + "Ġretailers": 16167, + "ĠWant": 16168, + "Ġdangers": 16169, + "Air": 16170, + "Ġvoluntary": 16171, + "eway": 16172, + "Ġinterpreted": 16173, + "otine": 16174, + "ç": 16175, + "Ġpel": 16176, + "Service": 16177, + "ĠEventually": 16178, + "Ġcareers": 16179, + "Ġthreaten": 16180, + "Ġmemor": 16181, + "ĠBradley": 16182, + "ancies": 16183, + "sn": 16184, + "ĠUnknown": 16185, + "National": 16186, + "Ġshadows": 16187, + "ailand": 16188, + "ĠDash": 16189, + "Everyone": 16190, + "izzard": 16191, + "March": 16192, + "=(": 16193, + "Ġpulls": 16194, + "Ġstranger": 16195, + "Ġbackwards": 16196, + "ĠBernard": 16197, + "imensional": 16198, + "Ġchron": 16199, + "Ġtheoretical": 16200, + "ktop": 16201, + "Ġware": 16202, + "ĠInvestig": 16203, + "ĠIniti": 16204, + "ĠOperations": 16205, + "oven": 16206, + "ocide": 16207, + "*/": 16208, + "Ġflames": 16209, + "ĠCash": 16210, + "shit": 16211, + "Ġcab": 16212, + "ĠAnaly": 16213, + "ĠSeah": 16214, + "Ġdefining": 16215, + "Ġordering": 16216, + "Ġimmun": 16217, + "Ġpersistent": 16218, + "ACH": 16219, + "Russian": 16220, + "mans": 16221, + "Ġhind": 16222, + "Ġphotography": 16223, + "©": 16224, + "Ġhug": 16225, + "Ġ107": 16226, + "ĠHence": 16227, + "iots": 16228, + "udeau": 16229, + "Ġsubsidies": 16230, + "Ġroutinely": 16231, + "ĠDevice": 16232, + "itic": 16233, + "Ġdisgust": 16234, + "lander": 16235, + "Ġ1940": 16236, + "Ġassignment": 16237, + "ĠBesides": 16238, + "wick": 16239, + "ĠDust": 16240, + "usc": 16241, + "structed": 16242, + "111": 16243, + "develop": 16244, + "Ġfond": 16245, + "Ġintersection": 16246, + "Ġdignity": 16247, + "Ġcommissioner": 16248, + "Without": 16249, + "reach": 16250, + "Ġcartoon": 16251, + "Ġscales": 16252, + "ãĥŃ": 16253, + "FIG": 16254, + "Ġsurveys": 16255, + "ĠIndonesia": 16256, + "Ġartwork": 16257, + "Ġunch": 16258, + "Ġcycling": 16259, + "unct": 16260, + "auer": 16261, + "orate": 16262, + "ĠObviously": 16263, + "Ġcharacterized": 16264, + "feld": 16265, + "Ġaffirm": 16266, + "Ġinnings": 16267, + "Ġé": 16268, + "Ġaliens": 16269, + "Ġcloth": 16270, + "etooth": 16271, + "ĠCertain": 16272, + "§": 16273, + "Ġdigest": 16274, + "know": 16275, + "ĠXL": 16276, + "Ġpredictions": 16277, + "Ġdin": 16278, + "WAR": 16279, + "Ġaftermath": 16280, + "Example": 16281, + "ĠSuccess": 16282, + "ĠThr": 16283, + "IGN": 16284, + "Ġminer": 16285, + "Bus": 16286, + "Ġclarity": 16287, + "heimer": 16288, + "ĠOUT": 16289, + "ĠSend": 16290, + "ĠCircle": 16291, + "ĠDiet": 16292, + "Ġpronounced": 16293, + "Ġcreators": 16294, + "Ġearthquake": 16295, + "attery": 16296, + "geons": 16297, + "Ġod": 16298, + "Ġlaying": 16299, + "orp": 16300, + "Ult": 16301, + "project": 16302, + "Ġundermin": 16303, + "Ġsequel": 16304, + "Sam": 16305, + "ĠDarkness": 16306, + "Ġreception": 16307, + "bull": 16308, + "YS": 16309, + "ĠVir": 16310, + "Ġsequences": 16311, + "ĠCoin": 16312, + "Ġoutfit": 16313, + "ĠWait": 16314, + "119": 16315, + "Ġdelivers": 16316, + "......": 16317, + "Ġblown": 16318, + "ĠEsc": 16319, + "ĠMath": 16320, + "perm": 16321, + "ĠUl": 16322, + "Ġglim": 16323, + "Ġfacial": 16324, + "Ġgreenhouse": 16325, + "Ġtokens": 16326, + "/-": 16327, + "ĠAnnual": 16328, + "ĠONE": 16329, + "Ġteenage": 16330, + "ĠPhysical": 16331, + "ĠLang": 16332, + "ĠCelt": 16333, + "Ġsued": 16334, + "ividually": 16335, + "Ġpatience": 16336, + "chair": 16337, + "regular": 16338, + "Ġaug": 16339, + "inv": 16340, + "except": 16341, + "ĠLil": 16342, + "Ġnest": 16343, + "fd": 16344, + "sum": 16345, + "ĠChase": 16346, + "Russia": 16347, + "ĠJennifer": 16348, + "Ġoffseason": 16349, + "Overall": 16350, + "Fore": 16351, + "Ġriot": 16352, + "Aud": 16353, + "former": 16354, + "Ġdefenders": 16355, + "ĠCT": 16356, + "iotic": 16357, + "ribly": 16358, + "Ġautomated": 16359, + "Ġpenis": 16360, + "Ġinsist": 16361, + "Ġdiagram": 16362, + "ĠSQL": 16363, + "ĠGarc": 16364, + "Ġwitch": 16365, + "client": 16366, + "ierra": 16367, + "ambers": 16368, + "Ġrecount": 16369, + "far": 16370, + "Very": 16371, + "osterone": 16372, + "Ġappreciated": 16373, + "ĠPerfect": 16374, + "Section": 16375, + "Ġdoses": 16376, + "ocaust": 16377, + "Ġcostly": 16378, + "Ġgrams": 16379, + "ĠShi": 16380, + "Ġwrestling": 16381, + "Ġ1971": 16382, + "Ġtrophy": 16383, + "Ġnerve": 16384, + "ĠKaz": 16385, + "ĠExperience": 16386, + "Ġpledged": 16387, + "Ġplayback": 16388, + "Ġcreativity": 16389, + "bye": 16390, + "Ġattackers": 16391, + "Ġholders": 16392, + "ĠCoach": 16393, + "ĠPhD": 16394, + "Ġtransfers": 16395, + "Ġcolored": 16396, + "ĠHindu": 16397, + "Ġdrown": 16398, + "Ġlistened": 16399, + "ĠWA": 16400, + "iasm": 16401, + "PO": 16402, + "Ġappealing": 16403, + "Ġdisclosed": 16404, + "ĠChicken": 16405, + "agging": 16406, + "Ġpleaded": 16407, + "Ġnavigation": 16408, + "ĠReturns": 16409, + "Ġ[[": 16410, + "ROR": 16411, + "EA": 16412, + "Ġphotographer": 16413, + "ĠRider": 16414, + "ippers": 16415, + "Ġslice": 16416, + "Ġerect": 16417, + "Ġhed": 16418, + "issance": 16419, + "ĠVikings": 16420, + "urious": 16421, + "Ġappet": 16422, + "oubtedly": 16423, + "Child": 16424, + "Ġauthentic": 16425, + "oos": 16426, + "ĠMaking": 16427, + "Ġannouncing": 16428, + "Ġbod": 16429, + "Ġmeter": 16430, + "ĠNine": 16431, + "ĠRogue": 16432, + "Ġworkforce": 16433, + "Ġrenewed": 16434, + "Ġorganisations": 16435, + "acs": 16436, + "PLE": 16437, + "Short": 16438, + "Ġcompounds": 16439, + "ĠVisit": 16440, + "Ġenvelop": 16441, + "earth": 16442, + "Ġsupportive": 16443, + "ggle": 16444, + "ĠBrussels": 16445, + "ĠGuild": 16446, + "Create": 16447, + "REL": 16448, + "Ġaveraged": 16449, + "Ġ1969": 16450, + "riages": 16451, + "Ġlengthy": 16452, + "Ġforgot": 16453, + "Okay": 16454, + "ĠErd": 16455, + "Ġdealer": 16456, + "Ġrecession": 16457, + "DD": 16458, + "Ġdesperately": 16459, + "Ġhunger": 16460, + "Ġsticks": 16461, + "Ġmph": 16462, + "ĠFaith": 16463, + "Ġintentionally": 16464, + "Ġdemol": 16465, + "ueller": 16466, + "ĠSale": 16467, + "Ġdebris": 16468, + "spring": 16469, + "Ġleap": 16470, + ">>>>": 16471, + "Ġcontainers": 16472, + "selling": 16473, + "ranean": 16474, + "attering": 16475, + "Ġcommented": 16476, + "ĠCM": 16477, + "onut": 16478, + "Ġwoods": 16479, + "especially": 16480, + "Ġorganize": 16481, + "ivic": 16482, + "ĠWoods": 16483, + "anga": 16484, + "squ": 16485, + "Ġmaj": 16486, + "amon": 16487, + "Ġaxis": 16488, + "Ġ1974": 16489, + "ĠDenmark": 16490, + "Ġwarrior": 16491, + "ĠPand": 16492, + "Ġoutlined": 16493, + "ĠBO": 16494, + "insula": 16495, + "zilla": 16496, + "ebook": 16497, + "Ġdare": 16498, + "Ġsearched": 16499, + "Ġnavigate": 16500, + "Sn": 16501, + "writing": 16502, + "Ġunited": 16503, + "Japan": 16504, + "ĠHebrew": 16505, + "Ġflame": 16506, + "Ġrelies": 16507, + "Ġcatching": 16508, + "ĠSho": 16509, + "Ġimprisonment": 16510, + "Ġpockets": 16511, + "Ġclosure": 16512, + "ĠFam": 16513, + "tim": 16514, + "adequ": 16515, + "Activity": 16516, + "Ġrecruiting": 16517, + "ĠWATCH": 16518, + "ĠArgentina": 16519, + "dest": 16520, + "Ġapologize": 16521, + "oro": 16522, + "Ġlacks": 16523, + "Ġtuned": 16524, + "ĠGriffin": 16525, + "Ġinfamous": 16526, + "Ġcelebrity": 16527, + "sson": 16528, + "Ġ----------------------------------------------------------------": 16529, + "ĠIsis": 16530, + "ĠDisplay": 16531, + "Ġcredibility": 16532, + "Ġeconomies": 16533, + "Ġheadline": 16534, + "ĠCowboys": 16535, + "Ġindef": 16536, + "Ġlately": 16537, + "Ġincentives": 16538, + "button": 16539, + "ĠMob": 16540, + "Aut": 16541, + "Ġresigned": 16542, + "ĠOm": 16543, + "camp": 16544, + "Ġprofiles": 16545, + "Ġschemes": 16546, + "olphins": 16547, + "ayed": 16548, + "Clinton": 16549, + "enh": 16550, + "ĠYahoo": 16551, + "Ġabst": 16552, + "Ġank": 16553, + "suits": 16554, + "Ġwished": 16555, + "ĠMarco": 16556, + "udden": 16557, + "Ġsphere": 16558, + "ĠBishop": 16559, + "Ġincorporated": 16560, + "ĠPlant": 16561, + "114": 16562, + "Ġhated": 16563, + "pic": 16564, + "Ġdonate": 16565, + "Ġlined": 16566, + "Ġbeans": 16567, + "Ġstealing": 16568, + "Ġcostume": 16569, + "Ġsheriff": 16570, + "Ġforty": 16571, + "Ġintact": 16572, + "Ġadapted": 16573, + "Ġtravelling": 16574, + "bart": 16575, + "Ġnicely": 16576, + "Ġdried": 16577, + "Ġscal": 16578, + "osity": 16579, + "NOTE": 16580, + "ĠBh": 16581, + "ĠBroncos": 16582, + "ĠIgn": 16583, + "Ġintimate": 16584, + "Ġchemistry": 16585, + "Ġoptimal": 16586, + "Deb": 16587, + "ĠGeneration": 16588, + "Ġ],": 16589, + "ichi": 16590, + "ĠWii": 16591, + "ĠYOUR": 16592, + "ventions": 16593, + "Write": 16594, + "Ġpopul": 16595, + "unning": 16596, + "ĠWor": 16597, + "Vol": 16598, + "Ġqueen": 16599, + "heads": 16600, + "KK": 16601, + "Ġanalyze": 16602, + "opic": 16603, + "earchers": 16604, + "Ġdot": 16605, + "legraph": 16606, + "astically": 16607, + "Ġupgrades": 16608, + "Ġcares": 16609, + "Ġextending": 16610, + "Ġfreeze": 16611, + "Ġinability": 16612, + "Ġorgans": 16613, + "Ġpretend": 16614, + "Ġoutlet": 16615, + "113": 16616, + "olan": 16617, + "ĠMall": 16618, + "uling": 16619, + "talk": 16620, + "Ġexpressing": 16621, + "ĠAlways": 16622, + "ĠBegin": 16623, + "files": 16624, + "Ġlicenses": 16625, + "%%": 16626, + "ĠMitt": 16627, + "Ġfilters": 16628, + "ĠMilwaukee": 16629, + "GN": 16630, + "Ġunfold": 16631, + "Mo": 16632, + "Ġnutrition": 16633, + "ppo": 16634, + "Bo": 16635, + "Ġfounding": 16636, + "Ġundermine": 16637, + "Ġeasiest": 16638, + "ĠCzech": 16639, + "ĠMack": 16640, + "Ġsexuality": 16641, + "ĠNixon": 16642, + "Win": 16643, + "ĠArn": 16644, + "ĠKin": 16645, + "ãĤ£": 16646, + "icer": 16647, + "Ġfortun": 16648, + "Ġsurfaces": 16649, + "aghd": 16650, + "Ġcarriers": 16651, + "ĠPART": 16652, + "ĠTib": 16653, + "Ġinterval": 16654, + "Ġfrustrating": 16655, + "ĠShip": 16656, + "ĠArmed": 16657, + "ffe": 16658, + "Ġboats": 16659, + "ĠAbraham": 16660, + "inis": 16661, + "Ġsuited": 16662, + "thread": 16663, + "iov": 16664, + "abul": 16665, + "ĠVenezuela": 16666, + "Ġtom": 16667, + "super": 16668, + "Ġcastle": 16669, + "although": 16670, + "ioxide": 16671, + "eches": 16672, + "Ġevolutionary": 16673, + "Ġnegotiate": 16674, + "Ġconfronted": 16675, + "Remember": 16676, + "Ġ170": 16677, + "Such": 16678, + "Ġ911": 16679, + "mult": 16680, + "ĠAbyss": 16681, + "urry": 16682, + "kees": 16683, + "spec": 16684, + "ĠBarbara": 16685, + "Ġbelonging": 16686, + "Ġvillain": 16687, + "istani": 16688, + "Ġaccountable": 16689, + "Ġportions": 16690, + "ĠDecl": 16691, + "Ur": 16692, + "ĠKate": 16693, + "gre": 16694, + "Ġmagazines": 16695, + "UCK": 16696, + "Ġregulate": 16697, + "omon": 16698, + "ĠAlmost": 16699, + "Ġoverview": 16700, + "Ġscram": 16701, + "Ġloot": 16702, + "ĠFitz": 16703, + "Ġcharacteristic": 16704, + "ĠSnake": 16705, + "say": 16706, + "ĠRico": 16707, + "Ġtrait": 16708, + "ĠJoined": 16709, + "aucus": 16710, + "Ġadaptation": 16711, + "ĠAirlines": 16712, + "Ġarchae": 16713, + "ĠIde": 16714, + "Ġbikes": 16715, + "Ġliterary": 16716, + "Ġinfluences": 16717, + "ĠUsed": 16718, + "Creat": 16719, + "Ġplea": 16720, + "ĠDefence": 16721, + "ĠAssass": 16722, + "Ġpond": 16723, + "ULT": 16724, + ")\"": 16725, + "Ġevaluated": 16726, + "Ġobtaining": 16727, + "Ġdemographic": 16728, + "Ġvigil": 16729, + "aley": 16730, + "Ġspouse": 16731, + "ĠSeahawks": 16732, + "respons": 16733, + "ĠBelt": 16734, + "umatic": 16735, + "Ġrises": 16736, + "runner": 16737, + "ĠMichelle": 16738, + "Ġpotent": 16739, + "race": 16740, + "ĠPAC": 16741, + "Find": 16742, + "olesterol": 16743, + "ISS": 16744, + "ĠIntroduced": 16745, + "resses": 16746, + "ignment": 16747, + "Os": 16748, + "ĠTu": 16749, + "ĠDex": 16750, + "icides": 16751, + "Ġsparked": 16752, + "ĠLaura": 16753, + "ĠBryant": 16754, + "Ġsmiling": 16755, + "ĠNexus": 16756, + "Ġdefendants": 16757, + "ĠCatal": 16758, + "Ġdishes": 16759, + "shaped": 16760, + "Ġprolong": 16761, + "mt": 16762, + "($": 16763, + "ãĢĤ": 16764, + "Ġcalculations": 16765, + "ĠSame": 16766, + "Ġpiv": 16767, + "HH": 16768, + "Ġcancelled": 16769, + "Ġgrin": 16770, + "Ġterritories": 16771, + "istically": 16772, + "Come": 16773, + "ĠParent": 16774, + "Project": 16775, + "Ġneglig": 16776, + "ĠPrivacy": 16777, + "Ġammo": 16778, + "LECT": 16779, + "olutely": 16780, + "ĠEpic": 16781, + "Ġmisunder": 16782, + "wal": 16783, + "April": 16784, + "mos": 16785, + "pathy": 16786, + "ĠCarson": 16787, + "Ġalbums": 16788, + "ĠEasy": 16789, + "Ġpistol": 16790, + "<<": 16791, + "Ġ\\(": 16792, + "target": 16793, + "help": 16794, + "Ġinterpre": 16795, + "conscious": 16796, + "ĠHousing": 16797, + "ĠJoint": 16798, + "127": 16799, + "Ġbeers": 16800, + "science": 16801, + "ĠFirefox": 16802, + "effective": 16803, + "ĠCabin": 16804, + "ĠOkay": 16805, + "ĠApplic": 16806, + "Ġspacecraft": 16807, + "ĠSR": 16808, + "vet": 16809, + "ĠStrange": 16810, + "SB": 16811, + "Ġcorps": 16812, + "iberal": 16813, + "efficient": 16814, + "Ġprevalence": 16815, + "Ġeconomists": 16816, + "118": 16817, + "Thread": 16818, + "ordable": 16819, + "ODE": 16820, + "ĠCant": 16821, + "=-=-": 16822, + "ifiable": 16823, + "ĠAround": 16824, + "Ġpole": 16825, + "Ġwillingness": 16826, + "CLA": 16827, + "ĠKid": 16828, + "Ġcomplement": 16829, + "Ġscattered": 16830, + "Ġinmates": 16831, + "Ġbleeding": 16832, + "every": 16833, + "Ġqueue": 16834, + "ĠTrain": 16835, + "Ġhij": 16836, + "Ġmelee": 16837, + "pleted": 16838, + "Ġdigit": 16839, + "Ġgem": 16840, + "official": 16841, + "Ġlifting": 16842, + "е": 16843, + "Requ": 16844, + "itutes": 16845, + "Ġpackaging": 16846, + "ĠWorkers": 16847, + "hran": 16848, + "ĠLebanon": 16849, + "olesc": 16850, + "Ġpunished": 16851, + "ĠJuan": 16852, + "Ġjam": 16853, + "ĠDocument": 16854, + "Ġmapping": 16855, + "icates": 16856, + "Ġinevitably": 16857, + "Ġvanilla": 16858, + "ĠTon": 16859, + "Ġwatches": 16860, + "Ġleagues": 16861, + "Ġinitiated": 16862, + "degree": 16863, + "portion": 16864, + "Ġrecalls": 16865, + "Ġruin": 16866, + "Ġmelt": 16867, + "IAN": 16868, + "Ġhem": 16869, + "Exp": 16870, + "Ġbaking": 16871, + "ĠColomb": 16872, + "atible": 16873, + "Ġradius": 16874, + "plug": 16875, + "ĠIF": 16876, + "etically": 16877, + "Ġfict": 16878, + "HER": 16879, + "ĠTap": 16880, + "atinum": 16881, + "Ġink": 16882, + "Ġcoh": 16883, + "ĠWizard": 16884, + "both": 16885, + "tex": 16886, + "Ġspends": 16887, + "ĠCurrently": 16888, + "ĠPit": 16889, + "Ġneurons": 16890, + "ignt": 16891, + "Ġrall": 16892, + "Ġbuses": 16893, + "building": 16894, + "Ġadjustments": 16895, + "Ġcried": 16896, + "iblical": 16897, + "atted": 16898, + "ĠZion": 16899, + "ĠMatter": 16900, + "Ġmeditation": 16901, + "ĠDennis": 16902, + "Ġours": 16903, + "ĠTab": 16904, + "Ġrankings": 16905, + "ortal": 16906, + "Ġadvers": 16907, + "Ġsurrender": 16908, + "ĠGob": 16909, + "cium": 16910, + "omas": 16911, + "imeter": 16912, + "Ġmultiplayer": 16913, + "Ġheroin": 16914, + "Ġoptimistic": 16915, + "Ġindicator": 16916, + "ĠBrig": 16917, + "Ġgrocery": 16918, + "Ġapplicant": 16919, + "ĠRocket": 16920, + "vid": 16921, + "Exception": 16922, + "pent": 16923, + "Ġorganizing": 16924, + "Ġencounters": 16925, + "ĠTOD": 16926, + "Ġjewel": 16927, + "Save": 16928, + "ĠChristie": 16929, + "Ġheating": 16930, + "Ġlazy": 16931, + "ĠCP": 16932, + "Ġcousin": 16933, + "Config": 16934, + "Ġregener": 16935, + "Ġnearest": 16936, + "Ġachieving": 16937, + "ENS": 16938, + "throw": 16939, + "ĠRichmond": 16940, + "antle": 16941, + "2002": 16942, + "Ġanten": 16943, + "bird": 16944, + "133": 16945, + "Ġnarc": 16946, + "raint": 16947, + "unny": 16948, + "ĠHispanic": 16949, + "ournaments": 16950, + "Ġprophe": 16951, + "ĠThailand": 16952, + "ĠTi": 16953, + "Ġinjection": 16954, + "Ġinherit": 16955, + "ravis": 16956, + "Ġmedi": 16957, + "Ġwhoever": 16958, + "ĠDEBUG": 16959, + "GP": 16960, + "ĠHud": 16961, + "Card": 16962, + "prom": 16963, + "Ġpor": 16964, + "Ġoverhead": 16965, + "Law": 16966, + "Ġviolate": 16967, + "Ġheated": 16968, + "Ġdescriptions": 16969, + "Ġachievements": 16970, + "ĠBeer": 16971, + "ĠQuant": 16972, + "Was": 16973, + "Ġeighth": 16974, + "ĠIv": 16975, + "Ġspecialized": 16976, + "UPDATE": 16977, + "ĠDelta": 16978, + "Pop": 16979, + "Jul": 16980, + "ĠAsk": 16981, + "ophy": 16982, + "Ġnewsletters": 16983, + "ĠTool": 16984, + "Ġgard": 16985, + "ĠConfeder": 16986, + "ĠGMT": 16987, + "ĠAbbott": 16988, + "Ġimmunity": 16989, + "ĠVM": 16990, + "Islam": 16991, + "Ġimplicit": 16992, + "wd": 16993, + "Ġ1944": 16994, + "ravity": 16995, + "ometric": 16996, + "Ġsurviving": 16997, + "urai": 16998, + "ĠPrison": 16999, + "Ġrust": 17000, + "ĠSketch": 17001, + "Ġbees": 17002, + "ĠTheory": 17003, + "Ġmerit": 17004, + "Tex": 17005, + "chat": 17006, + "Ġmim": 17007, + "Ġpaste": 17008, + "ĠKoch": 17009, + "Ġignorance": 17010, + "ĠShoot": 17011, + "Ġbasement": 17012, + "United": 17013, + "ĠAdvis": 17014, + "height": 17015, + "Ġfoster": 17016, + "Ġdetain": 17017, + "information": 17018, + "Ġneural": 17019, + "';": 17020, + "Ġproves": 17021, + "allery": 17022, + "Ġinvitation": 17023, + "umbers": 17024, + "Ġcattle": 17025, + "Ġbicycle": 17026, + "zi": 17027, + "Ġconsultant": 17028, + "Ġapology": 17029, + "ĠTiger": 17030, + "Ġ123": 17031, + "999": 17032, + "Ġindividually": 17033, + "rt": 17034, + "igion": 17035, + "ĠBrazilian": 17036, + "Ġdisturb": 17037, + "Ġentrepreneurs": 17038, + "Ġforests": 17039, + "cerpt": 17040, + "plates": 17041, + "pher": 17042, + "clipse": 17043, + "Ġtwitter": 17044, + "Ġacids": 17045, + "ographical": 17046, + "hum": 17047, + "ĠBald": 17048, + "ifully": 17049, + "Ġcompiler": 17050, + "ĠDA": 17051, + "Ġdonor": 17052, + "asi": 17053, + "Ġtribal": 17054, + "lash": 17055, + "ĠConfig": 17056, + "Ġapplicants": 17057, + "Ġsalaries": 17058, + "135": 17059, + "Putin": 17060, + "ĠFocus": 17061, + "irs": 17062, + "Ġmisconduct": 17063, + "ĠHaz": 17064, + "Ġeaten": 17065, + "Mobile": 17066, + "Muslim": 17067, + "ĠMarcus": 17068, + "viol": 17069, + "Ġfavorable": 17070, + "Ġstub": 17071, + "adin": 17072, + "ĠHob": 17073, + "Ġfaithful": 17074, + "Ġelectronics": 17075, + "Ġvacuum": 17076, + "wait": 17077, + "backed": 17078, + "economic": 17079, + "dist": 17080, + "Ġtenure": 17081, + "Ġsincere": 17082, + "ĠTogether": 17083, + "ĠWave": 17084, + "Ġprogression": 17085, + "Ġdenying": 17086, + "Ġdistress": 17087, + "braska": 17088, + "third": 17089, + "Ġmixing": 17090, + "Ġcolonial": 17091, + "Ġprivately": 17092, + "Ġunrest": 17093, + "aternity": 17094, + "Ġpremises": 17095, + "anti": 17096, + "gregation": 17097, + "Ġlicence": 17098, + "ĠHind": 17099, + "ĠSamuel": 17100, + "Ġconvincing": 17101, + "ĠAce": 17102, + "ĠRust": 17103, + "ĠNetanyahu": 17104, + "Ġhandles": 17105, + "ĠPatch": 17106, + "oriented": 17107, + "aho": 17108, + "ĠGonz": 17109, + "Ġhackers": 17110, + "claimer": 17111, + "Ġcustoms": 17112, + "ĠGran": 17113, + "fighters": 17114, + "Ġluc": 17115, + "Ġmanuscript": 17116, + "arenthood": 17117, + "Ġdevil": 17118, + "Ġwarriors": 17119, + "Ġoffenders": 17120, + "William": 17121, + "Ġholidays": 17122, + "Ġnightmare": 17123, + "Ġlever": 17124, + "ifferent": 17125, + "Stat": 17126, + "Ġexhibition": 17127, + "puted": 17128, + "ĠPure": 17129, + "Ġalpha": 17130, + "Ġenthusiasm": 17131, + "ĠRepresentatives": 17132, + "EAR": 17133, + "ĠTyp": 17134, + "Ġwheat": 17135, + "ĠAlf": 17136, + "Ġcorrection": 17137, + "Ġevangel": 17138, + "ATT": 17139, + "Miss": 17140, + "Ġsoup": 17141, + "Ġimplied": 17142, + "param": 17143, + "Ġsexy": 17144, + "ĠLux": 17145, + "Ġrepublic": 17146, + "patch": 17147, + "ablish": 17148, + "Ġicons": 17149, + "Ġfathers": 17150, + "ĠGET": 17151, + "ĠCarib": 17152, + "Ġregulated": 17153, + "ĠCohen": 17154, + "ĠBobby": 17155, + "Ġner": 17156, + "Ġbent": 17157, + "ventory": 17158, + "ĠAlong": 17159, + "ĠEST": 17160, + "ĠWallace": 17161, + "Ġmurders": 17162, + "rise": 17163, + "kell": 17164, + "ĠCommonwealth": 17165, + "Ġnasty": 17166, + "eta": 17167, + "ĠMIT": 17168, + "Ġadministered": 17169, + "Ġgenuinely": 17170, + "Editor": 17171, + "nick": 17172, + "Ġhydro": 17173, + "********************************": 17174, + "ĠBle": 17175, + "Ġfines": 17176, + "Ġgorge": 17177, + "ausible": 17178, + "rh": 17179, + "Ġapple": 17180, + "mentioned": 17181, + "Ġrope": 17182, + "otyp": 17183, + "HR": 17184, + "Ġdisappointing": 17185, + "Ġcage": 17186, + "nik": 17187, + "Ġdoubts": 17188, + "ĠFREE": 17189, + "prints": 17190, + "ĠMUST": 17191, + "Ġvendors": 17192, + "ĠInqu": 17193, + "Ġliberals": 17194, + "Ġcontractor": 17195, + "Ġupside": 17196, + "children": 17197, + "Ġtricky": 17198, + "Ġregulators": 17199, + "charged": 17200, + "liter": 17201, + "Ġ***": 17202, + "Ġrebell": 17203, + "lang": 17204, + "Ġlocals": 17205, + "Ġphysicians": 17206, + "Ġhey": 17207, + "arse": 17208, + "tm": 17209, + "ĠLex": 17210, + "Ġbehavioral": 17211, + "successful": 17212, + "FX": 17213, + "Ġbrick": 17214, + "ovic": 17215, + "Ġconform": 17216, + "Ġreviewing": 17217, + "Ġinsights": 17218, + "Ġbiology": 17219, + "ĠRemove": 17220, + "ĠExtra": 17221, + "Ġcommitting": 17222, + "induced": 17223, + "ignty": 17224, + "igm": 17225, + "Ġatomic": 17226, + "Common": 17227, + "ĠEM": 17228, + "ĠPere": 17229, + "ĠItems": 17230, + "eh": 17231, + "Ġpreserved": 17232, + "ĠHood": 17233, + "Ġprisoner": 17234, + "Ġbankruptcy": 17235, + "Ġgren": 17236, + "ushes": 17237, + "Ġexploitation": 17238, + "Ġsignatures": 17239, + "Ġfinan": 17240, + "],\"": 17241, + "ĠMR": 17242, + "Ġmeg": 17243, + "remlin": 17244, + "Ġmusicians": 17245, + "Ġselecting": 17246, + "Ġexamining": 17247, + "INK": 17248, + "lated": 17249, + "Hi": 17250, + "Ġartic": 17251, + "Ġpets": 17252, + "Ġimpair": 17253, + "ĠMAN": 17254, + "Ġtablets": 17255, + "include": 17256, + "Range": 17257, + "Ġcaut": 17258, + "Ġlogs": 17259, + "Ġmounting": 17260, + "Ġunaware": 17261, + "Ġdynamics": 17262, + "ĠPalestine": 17263, + "ĠQuarter": 17264, + "ĠPurple": 17265, + "Ġma": 17266, + "ĠImport": 17267, + "Ġcollections": 17268, + "ciation": 17269, + "Ġsuccessor": 17270, + "Ġclone": 17271, + "Ġaiming": 17272, + "Ġpossessed": 17273, + "Ġsticking": 17274, + "Ġshaking": 17275, + "Ġlocate": 17276, + "ĠHockey": 17277, + "Turn": 17278, + "170": 17279, + "Ġfifteen": 17280, + "ĠHarrison": 17281, + "Ġcontinuously": 17282, + "ĠTC": 17283, + "ĠValent": 17284, + "ĠRescue": 17285, + "Ġbypass": 17286, + "amount": 17287, + "Ġmast": 17288, + "Ġprotects": 17289, + "Ġartistic": 17290, + "Ġsometime": 17291, + "Ġshoe": 17292, + "Ġshouted": 17293, + "ificant": 17294, + "etitive": 17295, + "ĠRegister": 17296, + "ĠJin": 17297, + "Ġconcentrated": 17298, + "lington": 17299, + "onies": 17300, + "Ġgenerator": 17301, + "yrim": 17302, + "ĠArmen": 17303, + "Ġclearing": 17304, + "ido": 17305, + "ĠTW": 17306, + "alph": 17307, + "Ġladies": 17308, + "Hard": 17309, + "Ġdialog": 17310, + "Ġinputs": 17311, + "æľ": 17312, + "Ġposes": 17313, + "Ġslots": 17314, + "ĠPremium": 17315, + "Ġleaks": 17316, + "Ġbosses": 17317, + "Ġ113": 17318, + "course": 17319, + "Acc": 17320, + "ĠNewton": 17321, + "ĠAustria": 17322, + "ĠMage": 17323, + "Ġteaches": 17324, + "abad": 17325, + "Ġwears": 17326, + "Ġcyl": 17327, + "Ġcurse": 17328, + "ĠSales": 17329, + "ĠWings": 17330, + "Ġpsy": 17331, + "Ġgaps": 17332, + "ĠIceland": 17333, + "ĠPinterest": 17334, + "Ġlandlord": 17335, + "Ġdefinitions": 17336, + "ĠKer": 17337, + "Ġsufficiently": 17338, + "ĠPence": 17339, + "ĠArchitect": 17340, + "Ġsurpass": 17341, + "Ġ114": 17342, + "Ġsuperhero": 17343, + "ĠDisease": 17344, + "Ġpriests": 17345, + "ĠCulture": 17346, + "Ġdefinitive": 17347, + "Ġsecretly": 17348, + "ĠDance": 17349, + "install": 17350, + "chief": 17351, + "ĠJessica": 17352, + "Would": 17353, + "Updated": 17354, + "Ġlocker": 17355, + "ĠKay": 17356, + "Ġmemorial": 17357, + "è¦": 17358, + "fat": 17359, + "Ġdisgu": 17360, + "Ġflavors": 17361, + "ĠBaseball": 17362, + "ĠResistance": 17363, + "Ġkicks": 17364, + "Ġenv": 17365, + "Ġteenagers": 17366, + "Dark": 17367, + "ĠCAR": 17368, + "Ġhalt": 17369, + "ĠLG": 17370, + "ĠGabriel": 17371, + "Ġfever": 17372, + "Ġsatur": 17373, + "Ġmall": 17374, + "Ġaffiliate": 17375, + "ĠSleep": 17376, + "ĠSpecific": 17377, + "ĠVel": 17378, + "Ġjar": 17379, + "ĠSacred": 17380, + "ĠEdwards": 17381, + "ĠACL": 17382, + "Ġretained": 17383, + "ĠGiant": 17384, + "Ġlimitation": 17385, + "inces": 17386, + "Ġrefusal": 17387, + "ĠTale": 17388, + "ĠButler": 17389, + "Ġaccidents": 17390, + "ĠCSS": 17391, + "Ġimported": 17392, + "ĠCopy": 17393, + "α": 17394, + "ERT": 17395, + "zel": 17396, + "Ġdivisions": 17397, + "hots": 17398, + "ĠAlb": 17399, + "ĠDS": 17400, + "Loader": 17401, + "Washington": 17402, + "atisf": 17403, + "ĠCreative": 17404, + "\\.": 17405, + "ĠAutom": 17406, + "redict": 17407, + "Ġreceptor": 17408, + "ĠCarlos": 17409, + "Method": 17410, + "oka": 17411, + "Ġmalicious": 17412, + "Ġstepping": 17413, + ",[": 17414, + "ĠDad": 17415, + "Ġattraction": 17416, + "ĠEffects": 17417, + "ĠPirate": 17418, + "ĠCer": 17419, + "ĠIndustry": 17420, + "ĠRud": 17421, + "Ġcharter": 17422, + "Ġdining": 17423, + "Ġinsists": 17424, + "Ġconfigure": 17425, + "Ġ(#": 17426, + "ĠSimple": 17427, + "ĠScroll": 17428, + "UTC": 17429, + "175": 17430, + "ĠKon": 17431, + "Ġmarketplace": 17432, + "ĠãĤ": 17433, + "Ġrefres": 17434, + "Ġgates": 17435, + "erred": 17436, + "ĠPod": 17437, + "Ġbehave": 17438, + "Frank": 17439, + "node": 17440, + "Ġendorsed": 17441, + "hett": 17442, + "asive": 17443, + "ĠHomeland": 17444, + "Ġrides": 17445, + "ĠLeave": 17446, + "erness": 17447, + "Ġflooding": 17448, + "AFP": 17449, + "Ġrisen": 17450, + "Ġcontinually": 17451, + "Ġunanim": 17452, + "ĠContract": 17453, + "ĠPas": 17454, + "Ġguided": 17455, + "ĠChile": 17456, + "bd": 17457, + "Ġsucc": 17458, + "ptic": 17459, + "Ġcommittees": 17460, + "ĠLuther": 17461, + "ĠAnyone": 17462, + "Ġsab": 17463, + "124": 17464, + "Ġpixel": 17465, + "ĠBak": 17466, + "ĠTag": 17467, + "ĠBennett": 17468, + "Enter": 17469, + "small": 17470, + "ĠPresidential": 17471, + "Ġpul": 17472, + "Ġcontrace": 17473, + "archive": 17474, + "Ġcoastal": 17475, + "ĠKids": 17476, + "192": 17477, + "âĢ²": 17478, + "icky": 17479, + "INGTON": 17480, + "Ġwolf": 17481, + "ĠStalin": 17482, + "Tur": 17483, + "idget": 17484, + "amas": 17485, + "ĠUnless": 17486, + "Ġsponsor": 17487, + "Ġmorph": 17488, + "ĠChoose": 17489, + "Ġrunner": 17490, + "Ġunbel": 17491, + "Ġmud": 17492, + "ĠMana": 17493, + "Ġdubbed": 17494, + "Ġgodd": 17495, + "urers": 17496, + "window": 17497, + "Ġrelied": 17498, + "Ġcelebrating": 17499, + "osc": 17500, + "Ġ135": 17501, + "Ġlobbying": 17502, + "Ġincomplete": 17503, + "Ġrestriction": 17504, + "Ġincap": 17505, + "itus": 17506, + "Ġexpectation": 17507, + "ĠApollo": 17508, + "Ġintens": 17509, + "Ġsync": 17510, + "GH": 17511, + "Ġmanipulation": 17512, + "BY": 17513, + "Ġspear": 17514, + "Ġbreasts": 17515, + "Ġvolcan": 17516, + "ilia": 17517, + "Material": 17518, + "Ġformats": 17519, + "ĠBast": 17520, + "Ġparliamentary": 17521, + "Ġsnake": 17522, + "Ġservants": 17523, + "ĠTrudeau": 17524, + "ĠGrim": 17525, + "ĠArabic": 17526, + "ĠSCP": 17527, + "ĠBoys": 17528, + "station": 17529, + "Ġprospective": 17530, + "orde": 17531, + "initialized": 17532, + "Ġbored": 17533, + "ABLE": 17534, + "Ġaccessed": 17535, + "Ġtaxi": 17536, + "ĠShell": 17537, + "aiden": 17538, + "ursed": 17539, + "inates": 17540, + "ĠInsurance": 17541, + "ĠPete": 17542, + "September": 17543, + "650": 17544, + "Ġadventures": 17545, + "ĠCover": 17546, + "Ġtribute": 17547, + "Ġsketch": 17548, + "Ġempower": 17549, + "ĠØ": 17550, + "ĠGlenn": 17551, + "ĠDaw": 17552, + "=\\\"": 17553, + "ĠPolitics": 17554, + "Ġguides": 17555, + "Ġdioxide": 17556, + "ĠGore": 17557, + "ĠBright": 17558, + "ĠSierra": 17559, + "Ġvalued": 17560, + "cond": 17561, + "Ġpointer": 17562, + "Select": 17563, + "Ġrisky": 17564, + "Ġabsorb": 17565, + "images": 17566, + "Ġrefuses": 17567, + "Ġbonuses": 17568, + "___": 17569, + "Ġhilar": 17570, + "ĠFeatures": 17571, + "220": 17572, + "ĠCollector": 17573, + "Foot": 17574, + "Ġ1964": 17575, + "culus": 17576, + "Ġdawn": 17577, + "Ġworkout": 17578, + "ĠLO": 17579, + "Ġphilosophical": 17580, + "ĠSandy": 17581, + "ĠYouth": 17582, + "Ġliable": 17583, + "Af": 17584, + "blue": 17585, + "Ġoverturn": 17586, + "lessness": 17587, + "ĠTribune": 17588, + "ĠIng": 17589, + "Ġfactories": 17590, + "Ġcatches": 17591, + "Ġprone": 17592, + "Ġmatrix": 17593, + "Ġlogin": 17594, + "Ġinacc": 17595, + "Ġexert": 17596, + "sys": 17597, + "Ġneedle": 17598, + "ĠQur": 17599, + "Ġnotified": 17600, + "oulder": 17601, + "tx": 17602, + "Ġreminds": 17603, + "Ġpublishers": 17604, + "Ġnort": 17605, + "Ġgit": 17606, + "Ġflies": 17607, + "ĠEmily": 17608, + "Ġflowing": 17609, + "ĠAlien": 17610, + "ĠStrateg": 17611, + "Ġhardest": 17612, + "Ġmodification": 17613, + "API": 17614, + "ĠMY": 17615, + "Ġcrashes": 17616, + "stairs": 17617, + "number": 17618, + "Ġurging": 17619, + "channel": 17620, + "ĠFalcon": 17621, + "Ġinhabitants": 17622, + "Ġterrifying": 17623, + "Ġutilize": 17624, + "Ġbanner": 17625, + "Ġcigarettes": 17626, + "Ġsenses": 17627, + "ĠHolmes": 17628, + "Ġpractition": 17629, + "ĠPhillips": 17630, + "otto": 17631, + "Ġcompile": 17632, + "Model": 17633, + "ĠKo": 17634, + "Ġ[]": 17635, + "Americans": 17636, + "ĠTerms": 17637, + "Ġmedications": 17638, + "ĠAna": 17639, + "Ġfundamentally": 17640, + "ĠNotice": 17641, + "Ġweaker": 17642, + "Ġ0000": 17643, + "Ġgarlic": 17644, + "Ġoutbreak": 17645, + "Ġeconomist": 17646, + "ĠBirth": 17647, + "Ġobstacles": 17648, + "arcer": 17649, + "ĠOrthodox": 17650, + "Ġplacebo": 17651, + "ĠCrew": 17652, + "aspberry": 17653, + "ĠAngels": 17654, + "Ġdischarge": 17655, + "Ġdestructive": 17656, + "117": 17657, + "ĠRising": 17658, + "Ġdairy": 17659, + "late": 17660, + "Ġcollision": 17661, + "ĠTigers": 17662, + "eanor": 17663, + "ocumented": 17664, + "ĠInvalid": 17665, + "Ġdont": 17666, + "ĠLiter": 17667, + "ĠVa": 17668, + "Ġhydrogen": 17669, + "Ġvariants": 17670, + "ĠBrowns": 17671, + "Ġ1965": 17672, + "Ġindigenous": 17673, + "Ġtrades": 17674, + "Ġremainder": 17675, + "Ġswept": 17676, + "ĠImpact": 17677, + "Ġredist": 17678, + "Ġunint": 17679, + "graduate": 17680, + "ãĥķ": 17681, + "ĠWILL": 17682, + "ãģ®ç": 17683, + "ĠCritical": 17684, + "Ġfisher": 17685, + "Ġvicious": 17686, + "Ġreversed": 17687, + "Year": 17688, + "ĠSox": 17689, + "Ġshootings": 17690, + "Ġfilming": 17691, + "Ġtouchdowns": 17692, + "aires": 17693, + "mel": 17694, + "Ġgrandfather": 17695, + "Ġaffection": 17696, + "ingle": 17697, + "Ġoverly": 17698, + "Additional": 17699, + "Ġsupreme": 17700, + "ĠGrad": 17701, + "Ġsporting": 17702, + "Ġmercy": 17703, + "ĠBrooks": 17704, + "ounty": 17705, + "Ġperforms": 17706, + "Ġtightly": 17707, + "Ġdemons": 17708, + "Ġkillings": 17709, + "Ġfaction": 17710, + "ĠNova": 17711, + "auts": 17712, + "Ġundoubtedly": 17713, + "arin": 17714, + "Ġunderway": 17715, + "rak": 17716, + "Ġliv": 17717, + "ĠRegion": 17718, + "Ġbriefing": 17719, + "sers": 17720, + "cloud": 17721, + "ĠMik": 17722, + "usp": 17723, + "Ġprediction": 17724, + "azor": 17725, + "Ġportable": 17726, + "ĠGand": 17727, + "Ġpresenting": 17728, + "Ġ1080": 17729, + "»": 17730, + "ushi": 17731, + "ĠSpark": 17732, + "thereum": 17733, + "Ġjustification": 17734, + "ĠNy": 17735, + "Ġcontractors": 17736, + "mingham": 17737, + "ĠStyle": 17738, + "åħ": 17739, + "ĠChronicles": 17740, + "ĠPicture": 17741, + "Ġproving": 17742, + "Ġwives": 17743, + "sett": 17744, + "Ġmolecules": 17745, + "ĠFairy": 17746, + "Ġconsisting": 17747, + "Ġpier": 17748, + "alone": 17749, + "inition": 17750, + "Ġnucle": 17751, + "json": 17752, + "Ġgotta": 17753, + "Ġmobil": 17754, + "Ġverbal": 17755, + "arium": 17756, + "Ġmonument": 17757, + "ucked": 17758, + "Ġ256": 17759, + "Tech": 17760, + "minecraft": 17761, + "ĠTrack": 17762, + "Ġtile": 17763, + "Ġcompatibility": 17764, + "asis": 17765, + "Ġsadd": 17766, + "Ġinstructed": 17767, + "ĠMueller": 17768, + "Ġlethal": 17769, + "Ġhormone": 17770, + "Ġorche": 17771, + "else": 17772, + "Ġskelet": 17773, + "Ġentertaining": 17774, + "Ġminimize": 17775, + "again": 17776, + "Ġundergo": 17777, + "Ġconstraints": 17778, + "Ġcigarette": 17779, + "ĠIslamist": 17780, + "Ġtravels": 17781, + "ĠPanthers": 17782, + "lings": 17783, + "Care": 17784, + "Ġlawsuits": 17785, + "uras": 17786, + "Ġcryst": 17787, + "Ġlowered": 17788, + "Ġaerial": 17789, + "Ġcombinations": 17790, + "Ġhaun": 17791, + "Ġcha": 17792, + "Ġvine": 17793, + "Ġquantities": 17794, + "Ġlinking": 17795, + "bank": 17796, + "Ġsoy": 17797, + "Bill": 17798, + "ĠAngela": 17799, + "Ġrecipient": 17800, + "ĠProtest": 17801, + "Ġsocket": 17802, + "Ġsolidarity": 17803, + "ĠâĨ": 17804, + "mill": 17805, + "Ġvaries": 17806, + "ĠPakistani": 17807, + "Dragon": 17808, + "Ġune": 17809, + "Ġhorizon": 17810, + "³³³³³³³³": 17811, + "Ġprovinces": 17812, + "Ġfrankly": 17813, + "Ġenacted": 17814, + "notes": 17815, + "['": 17816, + "Ġ192": 17817, + "ocracy": 17818, + "Ġendorsement": 17819, + "Ġovertime": 17820, + "True": 17821, + "Lab": 17822, + "licted": 17823, + "ĠDNC": 17824, + "Ġbeats": 17825, + "ĠJamie": 17826, + "152": 17827, + "ĠINT": 17828, + "Contact": 17829, + "Ġaccounted": 17830, + "hash": 17831, + "ĠPackers": 17832, + "pires": 17833, + "Ġlesbian": 17834, + "Ġamendments": 17835, + "Ġhopeful": 17836, + "ĠFinland": 17837, + "Ġspotlight": 17838, + "Ġconfigured": 17839, + "Ġtroubled": 17840, + "Ġgaze": 17841, + "ĠCalgary": 17842, + "Ġreliability": 17843, + "Ġinsurg": 17844, + "swer": 17845, + "buy": 17846, + "ĠSkin": 17847, + "Ġpixels": 17848, + "Ġhandgun": 17849, + "Ġparas": 17850, + "Ġcategor": 17851, + "ĠEL": 17852, + "ĠRex": 17853, + "Indeed": 17854, + "Ġkinda": 17855, + "Ġconjunction": 17856, + "ĠBryan": 17857, + "ĠManufact": 17858, + "yang": 17859, + "Plus": 17860, + "SQL": 17861, + "ishment": 17862, + "Ġdominate": 17863, + "Ġnail": 17864, + "Ġoath": 17865, + "Ġerupt": 17866, + "ĠFine": 17867, + "itbart": 17868, + "ĠChip": 17869, + "ĠAbd": 17870, + "ĠNam": 17871, + "Ġbuyer": 17872, + "Ġdissent": 17873, + "Leaks": 17874, + "Contin": 17875, + "Ġrider": 17876, + "ĠSomeone": 17877, + "Ġillusion": 17878, + "cin": 17879, + "ĠBoeing": 17880, + "Ġinadequ": 17881, + "ovation": 17882, + "iants": 17883, + "Ġrebuild": 17884, + "450": 17885, + "ĠDestiny": 17886, + "SW": 17887, + "ĠTill": 17888, + "Hit": 17889, + "iaz": 17890, + "ĠBangl": 17891, + "achers": 17892, + "ĠReform": 17893, + "Ġsegments": 17894, + "Ġsystematic": 17895, + "dc": 17896, + "ĠConservatives": 17897, + "Ġportal": 17898, + "hor": 17899, + "ĠDragonbound": 17900, + "Ġdragged": 17901, + "omo": 17902, + "Ġthee": 17903, + "advert": 17904, + "ĠReports": 17905, + "ĠEt": 17906, + "Ġbarrels": 17907, + "August": 17908, + "Ġcomparisons": 17909, + "Ġhex": 17910, + "Ġanthrop": 17911, + "\"[": 17912, + "borough": 17913, + "abi": 17914, + "Ġpictured": 17915, + "playing": 17916, + "ĠAddress": 17917, + "ĠMirror": 17918, + "Smith": 17919, + "Ġtires": 17920, + "ĠNPR": 17921, + "AAAA": 17922, + "Ġclassification": 17923, + "ĠThan": 17924, + "ĠHarm": 17925, + "ĠRA": 17926, + "Ġrejection": 17927, + "mination": 17928, + "Ġranged": 17929, + "ĠFalls": 17930, + "DI": 17931, + "Host": 17932, + "ãĤ´": 17933, + "ĠExample": 17934, + "listed": 17935, + "thirds": 17936, + "Ġsafegu": 17937, + "brand": 17938, + "Ġprobable": 17939, + "Canada": 17940, + "ITION": 17941, + "ĠQaeda": 17942, + "Ġchick": 17943, + "Ġimports": 17944, + "hit": 17945, + "loc": 17946, + "WW": 17947, + "Ġblew": 17948, + "Ġanytime": 17949, + "Ġwholes": 17950, + "iked": 17951, + "Ġcalculation": 17952, + "create": 17953, + "ĠOri": 17954, + "Ġupgraded": 17955, + "Ġappar": 17956, + "utory": 17957, + "ĠMol": 17958, + "Brit": 17959, + "ĠJong": 17960, + "INAL": 17961, + "ĠStarting": 17962, + "Ġdice": 17963, + "urtle": 17964, + "Ġrelying": 17965, + "closure": 17966, + "Ġprofitable": 17967, + "Ġslaughter": 17968, + "ĠManual": 17969, + "caster": 17970, + "Ġ\"$": 17971, + "Ġfeather": 17972, + "ĠSimply": 17973, + "ieves": 17974, + "Ġdeterior": 17975, + "ĠPCI": 17976, + "Ġstamp": 17977, + "Ġflaws": 17978, + "Ġshade": 17979, + "hammer": 17980, + "Ġpassport": 17981, + "Ġconting": 17982, + "amel": 17983, + "Ġobservers": 17984, + "Ġneglect": 17985, + "ĠRB": 17986, + "ĠBrotherhood": 17987, + "Ġskeptical": 17988, + "family": 17989, + "usk": 17990, + "Ġemotionally": 17991, + "âĻ": 17992, + "ĠBeta": 17993, + "asonable": 17994, + "idity": 17995, + "ĠMul": 17996, + "Ġkicking": 17997, + "ĠCarm": 17998, + "ollah": 17999, + "VERTIS": 18000, + "ĠAthen": 18001, + "Ġladder": 18002, + "ĠBullet": 18003, + "å£": 18004, + "0001": 18005, + "ĠWildlife": 18006, + "ĠMask": 18007, + "ĠNan": 18008, + "Rev": 18009, + "Ġunacceptable": 18010, + "legal": 18011, + "Ġcrowded": 18012, + "agi": 18013, + "ĠCox": 18014, + "je": 18015, + "Ġmorality": 18016, + "Ġfuels": 18017, + "Ġcables": 18018, + "Ġmankind": 18019, + "ĠCaribbean": 18020, + "Ġanchor": 18021, + "Ġbyte": 18022, + "ĠOften": 18023, + "ĠOz": 18024, + "Ġcrafted": 18025, + "Ġhistorian": 18026, + "ĠWu": 18027, + "Ġtowers": 18028, + "ĠCitizens": 18029, + "Ġhelm": 18030, + "Ġcredentials": 18031, + "Ġsingular": 18032, + "ĠJesse": 18033, + "Ġtackles": 18034, + "Ġcontempt": 18035, + "Ġafore": 18036, + "ĠShadows": 18037, + "Ġnil": 18038, + "Ġurgent": 18039, + "apple": 18040, + "blood": 18041, + "Ġvon": 18042, + "Ġoffline": 18043, + "Ġbreathe": 18044, + "Ġjumps": 18045, + "Ġirrelevant": 18046, + "oxic": 18047, + "omal": 18048, + "important": 18049, + "Jim": 18050, + "Ġgloves": 18051, + "arming": 18052, + "depth": 18053, + "Ġtalents": 18054, + "ookie": 18055, + "ĠSB": 18056, + "Ġpalm": 18057, + "uffs": 18058, + "esta": 18059, + "IGH": 18060, + "Ġcanon": 18061, + "ĠVerizon": 18062, + "ĠPle": 18063, + "Ġcoupled": 18064, + "velt": 18065, + "Ġfundraising": 18066, + "ĠGetting": 18067, + "ĠDLC": 18068, + "Ġmathematical": 18069, + "ĠHS": 18070, + "ĠCardinals": 18071, + "telling": 18072, + "Ġsponsors": 18073, + "ĠÏ": 18074, + "ĠBulls": 18075, + "option": 18076, + "Ġpropose": 18077, + "Ġmemorable": 18078, + "Ġembraced": 18079, + "Ġdeclining": 18080, + "Health": 18081, + "eda": 18082, + "Ġ};": 18083, + "Ġspam": 18084, + "mile": 18085, + "Ġpitcher": 18086, + "ĠEight": 18087, + "Ġcaring": 18088, + "utic": 18089, + "role": 18090, + "Ġairline": 18091, + "ernandez": 18092, + "ĠAthlet": 18093, + "Ġcertification": 18094, + "uxe": 18095, + "riger": 18096, + "Ġempir": 18097, + "Ġsensation": 18098, + "Ġdism": 18099, + "Ġbolt": 18100, + "Ġevolve": 18101, + "House": 18102, + "Ġconsultation": 18103, + "ĠDuty": 18104, + "Ġtouches": 18105, + "ĠNathan": 18106, + "Ġfaint": 18107, + "had": 18108, + "\"(": 18109, + "ĠConsumer": 18110, + "ĠExtreme": 18111, + "Ġ127": 18112, + "ĠHerm": 18113, + "ĠSacrament": 18114, + "izoph": 18115, + "Ġanxious": 18116, + "ulously": 18117, + "Ġsocially": 18118, + "ĠUTC": 18119, + "Ġsolving": 18120, + "ĠLetter": 18121, + "History": 18122, + "educ": 18123, + "Price": 18124, + "));": 18125, + "Ġreload": 18126, + "amic": 18127, + "Ġpork": 18128, + "Ġdiscourse": 18129, + "Ġtournaments": 18130, + "airo": 18131, + "ĠKur": 18132, + "ĠCosta": 18133, + "Ġviolating": 18134, + "Ġinterfere": 18135, + "Ġrecreational": 18136, + "uffle": 18137, + "Ġspeeches": 18138, + "Ġneeding": 18139, + "Ġremembers": 18140, + "Ġcredited": 18141, + "nia": 18142, + "focused": 18143, + "amera": 18144, + "Ġbru": 18145, + "umbs": 18146, + "ĠCuban": 18147, + "Ġpreceding": 18148, + "Ġnonsense": 18149, + "acial": 18150, + "Ġsmartphones": 18151, + "ĠStories": 18152, + "Sports": 18153, + "ĠEmergency": 18154, + "ouncing": 18155, + "efined": 18156, + "Ġber": 18157, + "Ġconsulting": 18158, + "Ġmasters": 18159, + "heastern": 18160, + ".\"[": 18161, + "ĠRunning": 18162, + "Ġsuscept": 18163, + "ĠFeng": 18164, + "America": 18165, + "prises": 18166, + "stitial": 18167, + "ĠWeekly": 18168, + "ĠGreater": 18169, + "modules": 18170, + "ifter": 18171, + "Graphics": 18172, + "uler": 18173, + "Ġwholly": 18174, + "Ġsuppress": 18175, + "Ġconcealed": 18176, + "Ġhappily": 18177, + "Ġaccepts": 18178, + "ĠEnjoy": 18179, + "Ġrivers": 18180, + "ĠExcept": 18181, + "225": 18182, + "ĠNHS": 18183, + "ĠMcConnell": 18184, + "Ġpussy": 18185, + "ferred": 18186, + "utable": 18187, + "Ġattain": 18188, + "Ġ>=": 18189, + "Ġdeposits": 18190, + "rophic": 18191, + "Ġnotorious": 18192, + "ĠShaw": 18193, + "ilitation": 18194, + "Ġepidemic": 18195, + "allic": 18196, + "Ġsmallest": 18197, + "ovich": 18198, + "Ġaccessories": 18199, + "perties": 18200, + "Ġsurplus": 18201, + "ĠMech": 18202, + "Ġambig": 18203, + "ĠImmigration": 18204, + "Ġchim": 18205, + "eval": 18206, + "Ġpracticing": 18207, + "ĠMystery": 18208, + "Ġdomains": 18209, + "ĠSilicon": 18210, + "apps": 18211, + "Ġkilometers": 18212, + "ea": 18213, + "ĠSmash": 18214, + "Ġwarranty": 18215, + "Ġnost": 18216, + "sil": 18217, + "rev": 18218, + "Jon": 18219, + "ĠDublin": 18220, + "Ġtastes": 18221, + "Ġbout": 18222, + "great": 18223, + "error": 18224, + "Ġswitches": 18225, + "ĠBapt": 18226, + "DO": 18227, + "oki": 18228, + "Ġsourced": 18229, + "produ": 18230, + "Ġattachment": 18231, + "ĠIssue": 18232, + "ĠQuestion": 18233, + "Join": 18234, + "Ġfitted": 18235, + "Ġunlawful": 18236, + "^^": 18237, + "erek": 18238, + "Ġauthentication": 18239, + "Ġstole": 18240, + "Ġaccountability": 18241, + "label": 18242, + "Search": 18243, + "Ġalbeit": 18244, + "atican": 18245, + "funded": 18246, + "ĠAdding": 18247, + "ĠIQ": 18248, + "Ġsubmar": 18249, + "lit": 18250, + "aque": 18251, + "ĠLearning": 18252, + "Ġinteger": 18253, + "Master": 18254, + "ĠChrom": 18255, + "Ġpremier": 18256, + "Op": 18257, + "ĠLiu": 18258, + "Ġblessed": 18259, + "ĠGlobe": 18260, + "ĠResponse": 18261, + "Ġlegitim": 18262, + "ĠMerkel": 18263, + "Ġdisposal": 18264, + "´": 18265, + "Ġgauge": 18266, + "peat": 18267, + "Ġinduced": 18268, + "Ġquestionable": 18269, + "arthy": 18270, + "ĠVit": 18271, + "ĠFeed": 18272, + "Until": 18273, + "Ut": 18274, + "worthy": 18275, + "RY": 18276, + "ĠHerald": 18277, + "ĠHammer": 18278, + "Ġmedal": 18279, + "ĠRivers": 18280, + "ĠHack": 18281, + "Ġclarify": 18282, + "Ġtracked": 18283, + "Ġautonomous": 18284, + "Ġtenant": 18285, + "ĠQatar": 18286, + "erie": 18287, + "Ġgrim": 18288, + "ĠMonitor": 18289, + "Ġresistant": 18290, + "ĠSpec": 18291, + "ĠWells": 18292, + "NAS": 18293, + "148": 18294, + "Ġminers": 18295, + "iotics": 18296, + "Ġmisses": 18297, + "116": 18298, + "gian": 18299, + "git": 18300, + "ĠEyes": 18301, + "pres": 18302, + "Ġgraduated": 18303, + "Ġangel": 18304, + "Ġsynchron": 18305, + "Ġefficiently": 18306, + "Ġtransmitted": 18307, + "Harry": 18308, + "Ġglobally": 18309, + "ENCE": 18310, + "ĠMontana": 18311, + "raged": 18312, + "ĠPrevention": 18313, + "Ġpiss": 18314, + "ĠLl": 18315, + "Ġshelf": 18316, + "ĠBJP": 18317, + "ĠTestament": 18318, + "ĠLate": 18319, + "iker": 18320, + "ĠHapp": 18321, + "ĠJulian": 18322, + "hall": 18323, + "Ġspont": 18324, + "Ġshutdown": 18325, + "Ġinconsistent": 18326, + "Ġsubscribers": 18327, + "Ġskeleton": 18328, + "ĠNebraska": 18329, + "Ġinspire": 18330, + "ĠVoid": 18331, + "Feed": 18332, + "Ġangles": 18333, + "ĠSprings": 18334, + "Ġbenchmark": 18335, + "Ġvaccines": 18336, + "izophren": 18337, + "sexual": 18338, + "uffed": 18339, + "Ġshine": 18340, + "ĠKath": 18341, + "Ġgesture": 18342, + "inea": 18343, + "Ġrip": 18344, + "Ġoppression": 18345, + "Ġconscience": 18346, + "bt": 18347, + "ĠLum": 18348, + "Ġincidence": 18349, + "ĠFa": 18350, + "wr": 18351, + "Ġmineral": 18352, + "ĠSpurs": 18353, + "alky": 18354, + "Ġthunder": 18355, + "Ġopio": 18356, + "Being": 18357, + "ĠPalm": 18358, + "Ġwasted": 18359, + "Ġlb": 18360, + "iaries": 18361, + "ĠInitiative": 18362, + "Ġcurric": 18363, + "Ġmarker": 18364, + "ĠMcL": 18365, + "Ġextensions": 18366, + "ĠPv": 18367, + "ĠArms": 18368, + "Ġofferings": 18369, + "Ġdefenses": 18370, + "Ġvendor": 18371, + "Ġcontradict": 18372, + "ĠColin": 18373, + "Ġreddit": 18374, + "Ġperipher": 18375, + "122": 18376, + "Ġsins": 18377, + "Edit": 18378, + "ICT": 18379, + "Soft": 18380, + "ĠShah": 18381, + "Ġadministrator": 18382, + "ĠTrip": 18383, + "Ġpornography": 18384, + "Ġtuition": 18385, + "inence": 18386, + "ĠProgress": 18387, + "Ġcatalog": 18388, + "Ġsuite": 18389, + "Ġhike": 18390, + "Ġreproductive": 18391, + "engine": 18392, + "Ġdrought": 18393, + "ĠNoah": 18394, + "Ġ230": 18395, + "Ġdude": 18396, + "Ġrelaxed": 18397, + "Ġpartition": 18398, + "Ġparticipant": 18399, + "Ġtelesc": 18400, + "Ġfeas": 18401, + "ĠFF": 18402, + "owner": 18403, + "Ġsweeping": 18404, + "Ġlenses": 18405, + "Ġmatchup": 18406, + "ĠRepl": 18407, + "ournals": 18408, + "Ġcredible": 18409, + "Ġgrandmother": 18410, + "Ġthermal": 18411, + "Ġsubscribing": 18412, + "Ġidentities": 18413, + "colm": 18414, + "UCT": 18415, + "Ġreluctant": 18416, + "users": 18417, + "ĠCort": 18418, + "Ġassisted": 18419, + "OSS": 18420, + "ATIONS": 18421, + "ISH": 18422, + "Ġpharmaceutical": 18423, + "icable": 18424, + "adian": 18425, + "ĠSonic": 18426, + "ĠFury": 18427, + "ĠMong": 18428, + "AH": 18429, + "ĠPsychology": 18430, + "Ġphosph": 18431, + "Ġtreats": 18432, + "ŃĶ": 18433, + "Ġsteadily": 18434, + "ĠHello": 18435, + "Ġrelates": 18436, + "Ġclue": 18437, + "Expl": 18438, + "auth": 18439, + "Ġrevision": 18440, + "Ġeld": 18441, + "osion": 18442, + "Ġbron": 18443, + "144": 18444, + "rikes": 18445, + "Ġmines": 18446, + "Ġblanket": 18447, + "ĠFail": 18448, + "eled": 18449, + "ĠImagine": 18450, + "ĠPlanned": 18451, + "aic": 18452, + "Request": 18453, + "Mad": 18454, + "ĠHorse": 18455, + "ĠEagle": 18456, + "Ġcapac": 18457, + "157": 18458, + "Ġling": 18459, + "ĠNice": 18460, + "ĠParenthood": 18461, + "minster": 18462, + "ogs": 18463, + "ensitive": 18464, + "Nothing": 18465, + "Ġcarn": 18466, + "Fin": 18467, + "ĠPE": 18468, + "Ġrifles": 18469, + "ĠLP": 18470, + "Sand": 18471, + "ĠguiActive": 18472, + "Ġtourist": 18473, + "CNN": 18474, + "Ġunveiled": 18475, + "Ġpredecessor": 18476, + "}{": 18477, + "uber": 18478, + "Ġoffshore": 18479, + "Ġoptical": 18480, + "ĠRot": 18481, + "ĠPearl": 18482, + "eton": 18483, + "Ġstared": 18484, + "Ġfarther": 18485, + "atility": 18486, + "contin": 18487, + "ĠGy": 18488, + "ĠFoster": 18489, + "ĠCoc": 18490, + "rients": 18491, + "Ġdesigning": 18492, + "ĠEconomy": 18493, + "ONG": 18494, + "Women": 18495, + "ĠNancy": 18496, + "erver": 18497, + "Ġmascul": 18498, + "Ġcasualties": 18499, + "Ġ225": 18500, + "ĠSullivan": 18501, + "ĠChoice": 18502, + "Ġaster": 18503, + "ws": 18504, + "Ġhotels": 18505, + "Ġconsiderations": 18506, + "Ġcouch": 18507, + "ĠStrip": 18508, + "ĠGn": 18509, + "Ġmanipulate": 18510, + "lied": 18511, + "Ġsynthetic": 18512, + "Ġassaulted": 18513, + "Ġoffenses": 18514, + "ĠDrake": 18515, + "Ġimpe": 18516, + "October": 18517, + "ĠHeritage": 18518, + "hl": 18519, + "ĠBlair": 18520, + "Unlike": 18521, + "Ġgrief": 18522, + "Ġ450": 18523, + "Ġopted": 18524, + "Ġresignation": 18525, + "ilo": 18526, + "Ġverse": 18527, + "ĠTomb": 18528, + "Ġupt": 18529, + "Ġaired": 18530, + "ĠHook": 18531, + "ĠMLB": 18532, + "Ġassumes": 18533, + "outed": 18534, + "ĠVers": 18535, + "Ġinferior": 18536, + "Ġbundle": 18537, + "ĠDNS": 18538, + "ographer": 18539, + "Ġmultip": 18540, + "ĠSouls": 18541, + "Ġillustrated": 18542, + "Ġtactic": 18543, + "Ġdressing": 18544, + "Ġduo": 18545, + "Conf": 18546, + "Ġrelent": 18547, + "Ġcant": 18548, + "Ġscarce": 18549, + "Ġcandy": 18550, + "ĠCF": 18551, + "Ġaffiliated": 18552, + "Ġsprint": 18553, + "ylan": 18554, + "ĠGarcia": 18555, + "Ġjunk": 18556, + "Print": 18557, + "exec": 18558, + "Crit": 18559, + "Ġportrait": 18560, + "iries": 18561, + "ĠOFF": 18562, + "Ġdisputes": 18563, + "WR": 18564, + "Love": 18565, + "ãģĦ": 18566, + "ĠReyn": 18567, + "Ġhipp": 18568, + "opath": 18569, + "Ġfloors": 18570, + "ĠFeel": 18571, + "Ġworries": 18572, + "Ġsettlements": 18573, + "ĠPos": 18574, + "Ġmosque": 18575, + "Ġfinals": 18576, + "Ġcrushed": 18577, + "ĠProbably": 18578, + "ĠBot": 18579, + "ĠMans": 18580, + "ĠPeriod": 18581, + "Ġsovereignty": 18582, + "Ġseller": 18583, + "Ġapost": 18584, + "Ġamateur": 18585, + "Ġdorm": 18586, + "Ġconsuming": 18587, + "Ġarmour": 18588, + "ĠRoose": 18589, + "Ġintensive": 18590, + "Ġeliminating": 18591, + "ĠSunni": 18592, + "ĠAleppo": 18593, + "jin": 18594, + "Ġadvise": 18595, + "pal": 18596, + "ĠHalo": 18597, + "Ġdescent": 18598, + "Ġsimpler": 18599, + "Ġbooth": 18600, + "STR": 18601, + "Later": 18602, + "ĠCave": 18603, + "===": 18604, + "Ġmol": 18605, + "Ġfist": 18606, + "Ġshotgun": 18607, + "supp": 18608, + "Ġrobbery": 18609, + "Effect": 18610, + "Ġobscure": 18611, + "ĠProfessional": 18612, + "Ġembassy": 18613, + "Ġmilitant": 18614, + "Ġincarcer": 18615, + "Ġgenerates": 18616, + "Ġlaunches": 18617, + "Ġadministrators": 18618, + "Ġshaft": 18619, + "Ġcircular": 18620, + "Ġfreshman": 18621, + "ĠWes": 18622, + "ĠJoel": 18623, + "ĠDrew": 18624, + "ĠDuncan": 18625, + "ĠApparently": 18626, + "sight": 18627, + "ĠInternal": 18628, + "ĠIndividual": 18629, + "ĠFE": 18630, + "Ġbore": 18631, + "ĠMt": 18632, + "Ġbroadly": 18633, + "ĠOptions": 18634, + "ountain": 18635, + "ipes": 18636, + "ĠVideos": 18637, + "204": 18638, + "Ġhills": 18639, + "Ġsimulation": 18640, + "Ġdisappointment": 18641, + "itan": 18642, + "ĠLaboratory": 18643, + "Ġupward": 18644, + "Ġboundary": 18645, + "Ġdarker": 18646, + "hart": 18647, + "Ġdominance": 18648, + "Cong": 18649, + "ĠOracle": 18650, + "ĠLords": 18651, + "Ġscholarship": 18652, + "ĠVincent": 18653, + "ede": 18654, + "ĠRah": 18655, + "Ġencourages": 18656, + "rov": 18657, + "Ġquo": 18658, + "Ġpremise": 18659, + "ĠCrisis": 18660, + "ĠHolocaust": 18661, + "Ġrhythm": 18662, + "Ġmetric": 18663, + "club": 18664, + "Ġtransported": 18665, + "Ġnod": 18666, + "ĠPist": 18667, + "Ġancestors": 18668, + "ĠFreder": 18669, + "thumbnails": 18670, + "ĠCE": 18671, + "OND": 18672, + "Phil": 18673, + "venge": 18674, + "ĠProducts": 18675, + "castle": 18676, + "Ġqualifying": 18677, + "ĠKaren": 18678, + "VERTISEMENT": 18679, + "Ġmighty": 18680, + "Ġexplanations": 18681, + "Ġfixing": 18682, + "Di": 18683, + "Ġdeclaring": 18684, + "Ġanonymity": 18685, + "Ġjuven": 18686, + "ĠNord": 18687, + "ĠDoom": 18688, + "ĠActually": 18689, + "Ok": 18690, + "phis": 18691, + "ĠDesert": 18692, + "Ġ116": 18693, + "IK": 18694, + "ĠFM": 18695, + "Ġincomes": 18696, + "VEL": 18697, + "okers": 18698, + "Ġpecul": 18699, + "Ġlightweight": 18700, + "gue": 18701, + "Ġaccent": 18702, + "Ġincrement": 18703, + "ĠChan": 18704, + "Ġcomplaining": 18705, + "ĠBaghd": 18706, + "Ġmidfielder": 18707, + "Ġoverhaul": 18708, + "Process": 18709, + "ĠHollow": 18710, + "ĠTitans": 18711, + "Small": 18712, + "manuel": 18713, + "ĠUnity": 18714, + "ĠEvents": 18715, + "Sty": 18716, + "Ġdisproportion": 18717, + "nesty": 18718, + "enes": 18719, + "ĠCod": 18720, + "Ġdemonstrations": 18721, + "ĠCrimson": 18722, + "ĠOH": 18723, + "Ġenrolled": 18724, + "Ġcel": 18725, + "ĠBrett": 18726, + "Ġaide": 18727, + "Ġheels": 18728, + "Ġbroadband": 18729, + "Ġmarking": 18730, + "Ġwizard": 18731, + "ĠNJ": 18732, + "ĠChiefs": 18733, + "Ġingredient": 18734, + "Ġdug": 18735, + "ĠShut": 18736, + "urchase": 18737, + "endor": 18738, + "Ġfarmer": 18739, + "ĠGoldman": 18740, + "129": 18741, + "155": 18742, + "Order": 18743, + "Ġlion": 18744, + "iably": 18745, + "Ġstain": 18746, + "array": 18747, + "ilitary": 18748, + "ĠFAQ": 18749, + "Ġexploded": 18750, + "ĠMcCarthy": 18751, + "ĠTweet": 18752, + "ĠGreens": 18753, + "eking": 18754, + "ln": 18755, + "ensen": 18756, + "Ġmotorcycle": 18757, + "Ġparticle": 18758, + "Ġcholesterol": 18759, + "Bron": 18760, + "Ġstair": 18761, + "Ġoxid": 18762, + "Ġdesirable": 18763, + "ibles": 18764, + "Ġtheor": 18765, + "forcing": 18766, + "Ġpromotional": 18767, + "ovo": 18768, + "boot": 18769, + "ĠBonus": 18770, + "rawling": 18771, + "Ġshortage": 18772, + "ĠPsy": 18773, + "Ġrecruited": 18774, + "Ġinfants": 18775, + "Ġtestosterone": 18776, + "Ġdeduct": 18777, + "Ġdistinctive": 18778, + "Ġfirmware": 18779, + "built": 18780, + "145": 18781, + "Ġexplored": 18782, + "Ġfactions": 18783, + "Ġvide": 18784, + "Ġtattoo": 18785, + "Ġfinancially": 18786, + "Ġfatigue": 18787, + "Ġproceeding": 18788, + "constitutional": 18789, + "Ġmiser": 18790, + "Ġchairs": 18791, + "gging": 18792, + "ipple": 18793, + "Ġdent": 18794, + "Ġdisreg": 18795, + "çĶ": 18796, + "stant": 18797, + "llo": 18798, + "bps": 18799, + "akening": 18800, + "Ġabnormal": 18801, + "ĠERA": 18802, + "士": 18803, + "ĠHBO": 18804, + "ĠMAR": 18805, + "Ġconcess": 18806, + "Ġservant": 18807, + "Ġaspir": 18808, + "lav": 18809, + "ĠPanel": 18810, + "amo": 18811, + "Ġprecip": 18812, + "Ġrecordings": 18813, + "Ġproceeded": 18814, + "Ġcolony": 18815, + "ĠTang": 18816, + "ablo": 18817, + "Ġstripped": 18818, + "Left": 18819, + "too": 18820, + "Ġpotatoes": 18821, + "Ġfinest": 18822, + "%).": 18823, + "Ġcrap": 18824, + "ĠZach": 18825, + "abases": 18826, + "ĠGoth": 18827, + "Ġbillionaire": 18828, + "wolf": 18829, + "Ġsanction": 18830, + "SK": 18831, + "Ġlogged": 18832, + "Po": 18833, + "eyed": 18834, + "unal": 18835, + "Ġcricket": 18836, + "Ġarmies": 18837, + "Ġuncovered": 18838, + "Cloud": 18839, + "ón": 18840, + "Ġrebounds": 18841, + "Ġmes": 18842, + "Oper": 18843, + "Pac": 18844, + "Ġnationally": 18845, + "Ġinserted": 18846, + "pict": 18847, + "Ġgovernance": 18848, + "и": 18849, + "Ġprivileges": 18850, + "GET": 18851, + "Ġfavorites": 18852, + "imity": 18853, + "Ġlover": 18854, + "them": 18855, + "empl": 18856, + "Ġgorgeous": 18857, + "Ann": 18858, + "Ġslipped": 18859, + "Ġveto": 18860, + "Bob": 18861, + "Ġslim": 18862, + "ucc": 18863, + "ĠFame": 18864, + "uddenly": 18865, + "Ġdenies": 18866, + "ĠMaur": 18867, + "Ġdistances": 18868, + "Ġwanna": 18869, + "tar": 18870, + "ĠSER": 18871, + "ĠâĪ": 18872, + "Ġlemon": 18873, + "athetic": 18874, + "Ġliteral": 18875, + "Ġdistinguished": 18876, + "Ġanswering": 18877, + "GI": 18878, + "Ġreligions": 18879, + "ĠPhilos": 18880, + "ĠLay": 18881, + "Ġcompos": 18882, + "irements": 18883, + "ĠKos": 18884, + "inez": 18885, + "rolling": 18886, + "Ġyoungest": 18887, + "andise": 18888, + "ĠBorn": 18889, + "Ġaltar": 18890, + "amina": 18891, + "ĠBoot": 18892, + "voc": 18893, + "Ġdigging": 18894, + "Ġpressures": 18895, + "Ġlen": 18896, + "264": 18897, + "Ġassassination": 18898, + "ĠBirmingham": 18899, + "ĠMyth": 18900, + "Ġsovereign": 18901, + "ĠArtist": 18902, + "ĠPhotograph": 18903, + "Ġdepicted": 18904, + "Ġdispens": 18905, + "orthy": 18906, + "Ġambul": 18907, + "integ": 18908, + "ĠCele": 18909, + "ĠTibet": 18910, + "Ġhierarchy": 18911, + "Ġcu": 18912, + "Ġpreseason": 18913, + "ĠPeterson": 18914, + "Ġcolours": 18915, + "Ġworrying": 18916, + "Ġbackers": 18917, + "ĠPalmer": 18918, + "Ġμ": 18919, + "Ġcontributor": 18920, + "Ġhearings": 18921, + "Ġurine": 18922, + "ĠÙ": 18923, + "ourgeois": 18924, + "Similar": 18925, + "ĠZimmer": 18926, + "something": 18927, + "ĠUSC": 18928, + "Ġstrengths": 18929, + "ĠFI": 18930, + "Ġlogging": 18931, + "Asked": 18932, + "ĠThai": 18933, + "inqu": 18934, + "ĠWalt": 18935, + "Ġcrews": 18936, + "itism": 18937, + "301": 18938, + "Ġsharply": 18939, + "umed": 18940, + "Ġredirect": 18941, + "rators": 18942, + "Inf": 18943, + "ĠWeapons": 18944, + "Ġteasp": 18945, + "1999": 18946, + "Live": 18947, + "ĠEspecially": 18948, + "ĠSter": 18949, + "ĠVeterans": 18950, + "Ġintro": 18951, + "otherapy": 18952, + "Ġmalware": 18953, + "Ġbreeding": 18954, + "Ġmolecular": 18955, + "ĠRoute": 18956, + "ĠComment": 18957, + "ochem": 18958, + "Ġain": 18959, + "Season": 18960, + "Ġlinebacker": 18961, + "Ä«": 18962, + "ĠEconomics": 18963, + "esar": 18964, + "ĠLives": 18965, + "ĠEmma": 18966, + "Ġkin": 18967, + "ĠTerrit": 18968, + "Ġplanted": 18969, + "oton": 18970, + "ĠButter": 18971, + "ĠSpons": 18972, + "PER": 18973, + "Ġdungeon": 18974, + "Ġsymbolic": 18975, + "Ġfilmed": 18976, + "Ġdiets": 18977, + "Ġconcludes": 18978, + "Ġcertainty": 18979, + "ĠFormat": 18980, + "Ġstrangers": 18981, + "format": 18982, + "ĠPhase": 18983, + "Ġcopied": 18984, + "Ġmetres": 18985, + "lda": 18986, + "ĠUsers": 18987, + "Ġdeliberate": 18988, + "Ġwashed": 18989, + "ĠLance": 18990, + "imation": 18991, + "Ġimproper": 18992, + "ĠGenesis": 18993, + "ickr": 18994, + "ĠKush": 18995, + "Ġrealise": 18996, + "Ġembarrassing": 18997, + "alking": 18998, + "bucks": 18999, + "Ġverified": 19000, + "Ġoutline": 19001, + "years": 19002, + "ĠIncome": 19003, + "202": 19004, + "Ġzombies": 19005, + "Final": 19006, + "ĠMillenn": 19007, + "Ġmodifications": 19008, + "ĠVision": 19009, + "ĠMoses": 19010, + "verb": 19011, + "iterranean": 19012, + "ĠJet": 19013, + "Ġnaval": 19014, + "ĠAgg": 19015, + "Ġurl": 19016, + "Ġvictories": 19017, + "Ġnonetheless": 19018, + "Ġinjust": 19019, + "ĠFact": 19020, + "çļ": 19021, + "Ġinsufficient": 19022, + "review": 19023, + "facebook": 19024, + "Ġnegotiating": 19025, + "Ġguarantees": 19026, + "imen": 19027, + "utenberg": 19028, + "Ġgambling": 19029, + "Ġcongr": 19030, + "Loading": 19031, + "Ġnevertheless": 19032, + "Ġpresidents": 19033, + "ĠIndustrial": 19034, + "Ġ118": 19035, + "Ġpoured": 19036, + "ĠTory": 19037, + "Ġ175": 19038, + "Ġ:=": 19039, + "Scott": 19040, + "angered": 19041, + "Tok": 19042, + "Ġorganizers": 19043, + "Mat": 19044, + "ĠGrowth": 19045, + "Ġadul": 19046, + "Ġensures": 19047, + "Ġ117": 19048, + "é¾įå": 19049, + "Ġmassacre": 19050, + "Ġgrades": 19051, + "before": 19052, + "ADVERTISEMENT": 19053, + "ĠSlow": 19054, + "ĠMMA": 19055, + "âĢĶ\"": 19056, + "ĠVatican": 19057, + "Qaeda": 19058, + "Ġowe": 19059, + "6666": 19060, + "ĠSorry": 19061, + "ĠGrass": 19062, + "Ġbackgrounds": 19063, + "Ġexhausted": 19064, + "Ġclan": 19065, + "Ġcompromised": 19066, + "ĠElf": 19067, + "ĠIsaac": 19068, + "enson": 19069, + "Invest": 19070, + "IFA": 19071, + "Ġinterrupted": 19072, + "ãĥīãĥ©": 19073, + "Ġtwisted": 19074, + "ĠDragons": 19075, + "Mode": 19076, + "ĠKremlin": 19077, + "Ġfertil": 19078, + "heres": 19079, + "phan": 19080, + "ĠNode": 19081, + "fed": 19082, + "ĠOrc": 19083, + "Ġunwilling": 19084, + "Cent": 19085, + "Ġpriorit": 19086, + "Ġgraduates": 19087, + "Ġsubjective": 19088, + "Ġissuing": 19089, + "ĠLt": 19090, + "Ġviewer": 19091, + "Ġwoke": 19092, + "Thus": 19093, + "brook": 19094, + "Ġdepressed": 19095, + "Ġbracket": 19096, + "ĠGor": 19097, + "ĠFighting": 19098, + "Ġstriker": 19099, + "Report": 19100, + "ĠPortugal": 19101, + "Ġneo": 19102, + "wed": 19103, + "199": 19104, + "Ġfleeing": 19105, + "shadow": 19106, + "identified": 19107, + "USE": 19108, + "Steam": 19109, + "Ġstretched": 19110, + "Ġrevelations": 19111, + "arted": 19112, + "ĠDw": 19113, + "Ġalignment": 19114, + "eston": 19115, + "ĠJared": 19116, + "Sep": 19117, + "Ġblogs": 19118, + "update": 19119, + "gom": 19120, + "risk": 19121, + "Ġclash": 19122, + "ĠHour": 19123, + "Ġruntime": 19124, + "Ġunwanted": 19125, + "Ġscam": 19126, + "Ġrack": 19127, + "Ġenlight": 19128, + "onest": 19129, + "ĠFerr": 19130, + "Ġconvictions": 19131, + "Ġpiano": 19132, + "Ġcirculation": 19133, + "ĠWelcome": 19134, + "Ġbacklash": 19135, + "ĠWade": 19136, + "Ġreceivers": 19137, + "otive": 19138, + "Jeff": 19139, + "Ġnetworking": 19140, + "ĠPrep": 19141, + "ĠExplorer": 19142, + "Ġlecture": 19143, + "Ġuploaded": 19144, + "ĠMeat": 19145, + "BLE": 19146, + "ĠNazis": 19147, + "ĠSynd": 19148, + "stud": 19149, + "roots": 19150, + "rians": 19151, + "Ġportrayed": 19152, + "Ġ??": 19153, + "ĠBuddha": 19154, + "sun": 19155, + "Robert": 19156, + "ĠComplex": 19157, + "Ġoversee": 19158, + "Ġstealth": 19159, + "Title": 19160, + "ĠJobs": 19161, + "ĠKum": 19162, + "Ġappreciation": 19163, + "ĠMOD": 19164, + "Ġbasics": 19165, + "Ġclips": 19166, + "Ġnursing": 19167, + "Ġproposition": 19168, + "Ġrealised": 19169, + "ĠNYC": 19170, + "Ġallocated": 19171, + "rium": 19172, + "aran": 19173, + "ĠProduction": 19174, + "ĠVote": 19175, + "Ġsmugg": 19176, + "Ġhunter": 19177, + "azer": 19178, + "ĠChanges": 19179, + "Ġfluct": 19180, + "yon": 19181, + "Array": 19182, + "Ġkits": 19183, + "Water": 19184, + "Ġuncommon": 19185, + "Ġresting": 19186, + "ells": 19187, + "would": 19188, + "Ġpursued": 19189, + "Ġassertion": 19190, + "ometown": 19191, + "ĠMosul": 19192, + "ĠPlatform": 19193, + "iolet": 19194, + "Ġshareholders": 19195, + "Ġtrails": 19196, + "Pay": 19197, + "ĠEnforcement": 19198, + "types": 19199, + "ĠAnonymous": 19200, + "Ġsatisfying": 19201, + "ilogy": 19202, + "Ġ('": 19203, + "wave": 19204, + "city": 19205, + "Steve": 19206, + "Ġconfrontation": 19207, + "ĠEld": 19208, + "Capt": 19209, + "ahan": 19210, + "htm": 19211, + "ĠCtrl": 19212, + "ONS": 19213, + "230": 19214, + "ifa": 19215, + "holding": 19216, + "Ġdelicate": 19217, + "Ġjaw": 19218, + "ĠGoing": 19219, + "orum": 19220, + "Sal": 19221, + "Ġdull": 19222, + "ĠBeth": 19223, + "Ġprisons": 19224, + "Ġego": 19225, + "ĠElsa": 19226, + "avorite": 19227, + "ĠGang": 19228, + "ĠNuclear": 19229, + "Ġspider": 19230, + "atsu": 19231, + "Ġsampling": 19232, + "Ġabsorbed": 19233, + "ĠPharm": 19234, + "ieth": 19235, + "Ġbucket": 19236, + "ĠRecomm": 19237, + "OF": 19238, + "ĠFactory": 19239, + "ANCE": 19240, + "Ġbacter": 19241, + "Has": 19242, + "ĠObserv": 19243, + "121": 19244, + "Ġpremiere": 19245, + "Develop": 19246, + "Ġcurrencies": 19247, + "Cast": 19248, + "Ġaccompanying": 19249, + "ĠNashville": 19250, + "Ġfatty": 19251, + "ĠBrend": 19252, + "Ġlocks": 19253, + "Ġcentered": 19254, + "ĠUT": 19255, + "aughs": 19256, + "orie": 19257, + "ĠAffordable": 19258, + "vance": 19259, + "DL": 19260, + "emet": 19261, + "Ġthrone": 19262, + "ĠBluetooth": 19263, + "Ġnaming": 19264, + "ifts": 19265, + "ADE": 19266, + "Ġcorrected": 19267, + "Ġpromptly": 19268, + "ĠSTR": 19269, + "Ġgenome": 19270, + "Ġcope": 19271, + "Ġvalley": 19272, + "Ġrounded": 19273, + "ĠKend": 19274, + "alion": 19275, + "pers": 19276, + "Ġtourism": 19277, + "Ġstark": 19278, + "vl": 19279, + "Ġblowing": 19280, + "ĠSchedule": 19281, + "std": 19282, + "Ġunhappy": 19283, + "Ġlitigation": 19284, + "cedes": 19285, + "Ġandroid": 19286, + "Ġintegral": 19287, + "erers": 19288, + "uded": 19289, + "tax": 19290, + "Ġreiter": 19291, + "ĠMotors": 19292, + "ociated": 19293, + "Ġwonders": 19294, + "ĠApost": 19295, + "ucking": 19296, + "ĠRoosevelt": 19297, + "fram": 19298, + "Ġyields": 19299, + "Ġconstitutes": 19300, + "awk": 19301, + "Interest": 19302, + "Ġinterim": 19303, + "Ġbreakthrough": 19304, + "ĠCher": 19305, + "Ġprosec": 19306, + "ĠDj": 19307, + "ĠMT": 19308, + "Resp": 19309, + "ĠPT": 19310, + "Ġsperm": 19311, + "edit": 19312, + "BT": 19313, + "Linux": 19314, + "country": 19315, + "league": 19316, + "Ġdick": 19317, + "Ġoct": 19318, + "Ġinserting": 19319, + "Ġscra": 19320, + "ĠBrewing": 19321, + "Ġ1966": 19322, + "Ġrunners": 19323, + "Ġplun": 19324, + "idy": 19325, + "ĠDian": 19326, + "Ġdysfunction": 19327, + "Ġexclusion": 19328, + "Ġdisgr": 19329, + "Ġincorporate": 19330, + "Ġreconc": 19331, + "Ġnominated": 19332, + "ĠArcher": 19333, + "draw": 19334, + "achelor": 19335, + "Ġwritings": 19336, + "Ġshallow": 19337, + "Ġhast": 19338, + "ĠBMW": 19339, + "ĠRS": 19340, + "Ġthigh": 19341, + "Ġ1963": 19342, + "Ġlamb": 19343, + "Ġfavored": 19344, + "agle": 19345, + "Ġcooler": 19346, + "ĠHours": 19347, + "ĠGU": 19348, + "ĠOrigin": 19349, + "Ġglimpse": 19350, + "--------------------": 19351, + "Lim": 19352, + "Ġcheek": 19353, + "Ġjealous": 19354, + "-'": 19355, + "Ġharness": 19356, + "ĠPoison": 19357, + "Ġdisabilities": 19358, + "neapolis": 19359, + "Ġoutlook": 19360, + "Ġnotify": 19361, + "ĠIndianapolis": 19362, + "Ġabrupt": 19363, + "nsic": 19364, + "Ġencrypted": 19365, + "Ġforfe": 19366, + "reath": 19367, + "Ġrabb": 19368, + "Ġfoundations": 19369, + "Ġcompliment": 19370, + "ĠInterview": 19371, + "ĠSwe": 19372, + "Ġadolesc": 19373, + "Ġmonitors": 19374, + "ĠSacramento": 19375, + "Ġtimely": 19376, + "Ġcontempl": 19377, + "Ġpositioned": 19378, + "Ġposters": 19379, + "phies": 19380, + "iovascular": 19381, + "void": 19382, + "ĠFifth": 19383, + "Ġinvestigative": 19384, + "OUN": 19385, + "Ġintegrate": 19386, + "ĠINC": 19387, + "isha": 19388, + "iblings": 19389, + "ĠRequest": 19390, + "ĠRodriguez": 19391, + "Ġslides": 19392, + "ĠDX": 19393, + "Ġfeminism": 19394, + "Ġdatas": 19395, + "Ġbend": 19396, + "irus": 19397, + "ĠNigeria": 19398, + "Fox": 19399, + "Change": 19400, + "Ġairplane": 19401, + "ĠLaden": 19402, + "Ġpublicity": 19403, + "ixty": 19404, + "Ġcommitments": 19405, + "Ġaggregate": 19406, + "Ġdisplaying": 19407, + "ĠArrow": 19408, + "Ġ122": 19409, + "Ġrespects": 19410, + "android": 19411, + "six": 19412, + "ĠSha": 19413, + "Ġrestoration": 19414, + ")\\": 19415, + "WS": 19416, + "oys": 19417, + "Ġillustrate": 19418, + "without": 19419, + "126": 19420, + "ĠâĶĤ": 19421, + "Ġpickup": 19422, + "nels": 19423, + "Ġ....": 19424, + "food": 19425, + "ĠFen": 19426, + ")?": 19427, + "Ġphenomena": 19428, + "Ġcompanions": 19429, + "ĠWrite": 19430, + "Ġspill": 19431, + "Ġbridges": 19432, + "ĠUpdated": 19433, + "ĠFo": 19434, + "Ġinsects": 19435, + "ASHINGTON": 19436, + "Ġscare": 19437, + "iltr": 19438, + "ĠZhang": 19439, + "Ġseverity": 19440, + "Ġindul": 19441, + "149": 19442, + "ĠCoffee": 19443, + "Ġnorms": 19444, + "Ġpulse": 19445, + "ĠFT": 19446, + "Ġhorrific": 19447, + "ĠDestroy": 19448, + "ĠJSON": 19449, + "Ġolive": 19450, + "Ġdiscusses": 19451, + "Rest": 19452, + "Elect": 19453, + "ĠWinn": 19454, + "ĠSurviv": 19455, + "ĠHait": 19456, + "Sure": 19457, + "oped": 19458, + "Ġrooted": 19459, + "ĠSke": 19460, + "ĠBronze": 19461, + "Ġlol": 19462, + "Default": 19463, + "Ġcommodity": 19464, + "redited": 19465, + "Ġlibertarian": 19466, + "Ġforbidden": 19467, + "Ġgran": 19468, + "à¨": 19469, + "Ġlag": 19470, + "enz": 19471, + "drive": 19472, + "Ġmathematics": 19473, + "Ġwires": 19474, + "Ġcritically": 19475, + "Ġcarbohyd": 19476, + "ĠChancellor": 19477, + "ĠEddie": 19478, + "Ġbanning": 19479, + "ĠFri": 19480, + "Ġcomplications": 19481, + "etric": 19482, + "ĠBangladesh": 19483, + "Ġbandwidth": 19484, + "Stop": 19485, + "ĠOriginally": 19486, + "Ġhalfway": 19487, + "ynasty": 19488, + "shine": 19489, + "Ġtales": 19490, + "rities": 19491, + "avier": 19492, + "Ġspinning": 19493, + "ĠWHO": 19494, + "Ġneighbourhood": 19495, + "bach": 19496, + "Ġcommerce": 19497, + "ĠSle": 19498, + "BU": 19499, + "Ġentrepreneur": 19500, + "Ġpeculiar": 19501, + "ĠComments": 19502, + "fre": 19503, + "320": 19504, + "ICS": 19505, + "Ġimagery": 19506, + "ĠCanon": 19507, + "ĠElectronic": 19508, + "short": 19509, + "((": 19510, + "Dig": 19511, + "Ġcommem": 19512, + "uced": 19513, + "Ġinclined": 19514, + "ĠSummon": 19515, + "Ġcliff": 19516, + "ĠMediterranean": 19517, + "Ġpoetry": 19518, + "Ġprosperity": 19519, + "ĠRece": 19520, + "Ġpills": 19521, + "member": 19522, + "Ġfinale": 19523, + "unc": 19524, + "ĠGig": 19525, + "ä½": 19526, + "Ġlod": 19527, + "Ġbackward": 19528, + "-+": 19529, + "ĠForward": 19530, + "Ġthri": 19531, + "sure": 19532, + "Ġsoap": 19533, + "ĠFX": 19534, + "RES": 19535, + "ĠSexual": 19536, + "oulos": 19537, + "Ġfoolish": 19538, + "Ġrighteous": 19539, + "Ġcoff": 19540, + "terrorism": 19541, + "ustain": 19542, + "oter": 19543, + "Ġabuses": 19544, + "next": 19545, + "Ġabusive": 19546, + "Ġthereafter": 19547, + "Ġprohibition": 19548, + "ĠSUP": 19549, + "Ġdip": 19550, + "Ġripped": 19551, + "Ġinherited": 19552, + "Ġbats": 19553, + "stru": 19554, + "GT": 19555, + "Ġflawed": 19556, + "phabet": 19557, + "Ġfog": 19558, + "doors": 19559, + "Ġimaging": 19560, + "Ġdigits": 19561, + "ĠHungary": 19562, + "Ġarrog": 19563, + "Ġteachings": 19564, + "Ġprotocols": 19565, + "ĠBanks": 19566, + "à¸": 19567, + "pound": 19568, + "ĠCurt": 19569, + ".\")": 19570, + "./": 19571, + "Ġexemption": 19572, + "endix": 19573, + "ĠMull": 19574, + "Ġimproves": 19575, + "ĠGamer": 19576, + "dimensional": 19577, + "Icon": 19578, + "ĠMargaret": 19579, + "Status": 19580, + "dates": 19581, + "Ġintends": 19582, + "Ġdepict": 19583, + "Ġparked": 19584, + "Joe": 19585, + "ĠMarines": 19586, + "chnology": 19587, + "!).": 19588, + "Ġjudged": 19589, + "Ġweights": 19590, + "Ray": 19591, + "Ġapartments": 19592, + "hester": 19593, + "Ġreinforce": 19594, + "Ġoffender": 19595, + "occup": 19596, + "Ġsore": 19597, + "ept": 19598, + "ĠPHP": 19599, + "ĠBrow": 19600, + "Ġauthorization": 19601, + "ĠRisk": 19602, + "ĠDelaware": 19603, + "ĠQU": 19604, + "Ġnotifications": 19605, + "Ġsunlight": 19606, + "Ġexclude": 19607, + "dat": 19608, + "Ġmesh": 19609, + "ĠSudan": 19610, + "Ġbelonged": 19611, + "Ġsubway": 19612, + "Ġnoon": 19613, + "ĠInterior": 19614, + "olics": 19615, + "ĠLakers": 19616, + "Ġcoding": 19617, + "Disclaimer": 19618, + "Calif": 19619, + "Old": 19620, + "Ġdisl": 19621, + "?????": 19622, + "Ġconfirms": 19623, + "Ġrecruitment": 19624, + "Ġhomicide": 19625, + "Consider": 19626, + "ĠJeffrey": 19627, + "fty": 19628, + "};": 19629, + "Ġobjection": 19630, + "doing": 19631, + "ĠLeo": 19632, + "Want": 19633, + "Ġglow": 19634, + "ĠClarke": 19635, + "ĠNorman": 19636, + "Ġverification": 19637, + "Ġpacket": 19638, + "ĠFormula": 19639, + "Ġplag": 19640, + "esville": 19641, + "Ġshouting": 19642, + "Ġov": 19643, + "ĠREC": 19644, + "ĠBub": 19645, + "Ġninth": 19646, + "Ġenerg": 19647, + "Ġvalidity": 19648, + "Ġups": 19649, + "jack": 19650, + "Ġneighboring": 19651, + "ĠNec": 19652, + "eworks": 19653, + "ĠHab": 19654, + "arez": 19655, + "Ġspine": 19656, + "Ġeventual": 19657, + "ĠLeaders": 19658, + "ĠCarn": 19659, + "Ġprobation": 19660, + "Ġromance": 19661, + "msg": 19662, + "ĠMechanical": 19663, + "ERY": 19664, + "Rock": 19665, + "Ġpartisan": 19666, + "Node": 19667, + "assets": 19668, + "minent": 19669, + "Ġforeigners": 19670, + "Ġtestify": 19671, + "ĠUsually": 19672, + "lords": 19673, + "ĠGren": 19674, + "ĠPowell": 19675, + "BIL": 19676, + "Ġsr": 19677, + "Ġaddict": 19678, + "Ġshells": 19679, + "Ġsigh": 19680, + "ĠYale": 19681, + "ternity": 19682, + "Ġ750": 19683, + "EU": 19684, + "ĠRifle": 19685, + "Ġpatron": 19686, + "ema": 19687, + "ĠBannon": 19688, + "anity": 19689, + "Ġtropical": 19690, + "ĠVII": 19691, + "cross": 19692, + "Everything": 19693, + "ĠISO": 19694, + "Ġhumble": 19695, + "assing": 19696, + "ĠFIG": 19697, + "Ġupdating": 19698, + "yson": 19699, + "Ġcalcium": 19700, + "Ġcompetent": 19701, + "Ġsteering": 19702, + "Prot": 19703, + "ĠSY": 19704, + "ĠFinals": 19705, + "ĠRug": 19706, + "159": 19707, + "137": 19708, + "ĠGolf": 19709, + "Ġ126": 19710, + "Ġaccommodation": 19711, + "ĠHughes": 19712, + "Ġaesthetic": 19713, + "artisan": 19714, + "ĠTwilight": 19715, + "Ġprince": 19716, + "ĠAgriculture": 19717, + "ĠDisco": 19718, + "Ġprecedent": 19719, + "Ġtyping": 19720, + "authorized": 19721, + "Option": 19722, + "ĠAub": 19723, + "lishes": 19724, + "acht": 19725, + "mag": 19726, + "Peter": 19727, + "ĠUFO": 19728, + "monton": 19729, + "ĠLith": 19730, + "Ġarom": 19731, + "Ġsecuring": 19732, + "Ġconfined": 19733, + "private": 19734, + "Ġswords": 19735, + "Ġmarkers": 19736, + "Ġmetabolic": 19737, + "select": 19738, + "ĠCurse": 19739, + "ĠOt": 19740, + "gressive": 19741, + "Ġincumb": 19742, + "ĠSaga": 19743, + "Ġpriced": 19744, + "Ġclearance": 19745, + "Content": 19746, + "Ġdrilling": 19747, + "Ġnotices": 19748, + "Ġbourgeois": 19749, + "Ġvest": 19750, + "Ġcookie": 19751, + "ĠGuardians": 19752, + "rys": 19753, + "inyl": 19754, + "Ġ124": 19755, + "Ġplausible": 19756, + "ongh": 19757, + "ĠOdin": 19758, + "Ġconception": 19759, + "ĠYuk": 19760, + "ĠBaghdad": 19761, + "ĠFlag": 19762, + "Austral": 19763, + "ĠIBM": 19764, + "Ġinternationally": 19765, + "ĠWikiLeaks": 19766, + "IED": 19767, + "Ġcyn": 19768, + "Ġchooses": 19769, + "ĠPill": 19770, + "Ġcombining": 19771, + "Ġradi": 19772, + "ĠMohammed": 19773, + "defense": 19774, + "atching": 19775, + "Subject": 19776, + "iciency": 19777, + "Frame": 19778, + "Ġ{\"": 19779, + "Ġchess": 19780, + "Ġtimer": 19781, + "190": 19782, + "Ġtin": 19783, + "Ġordinance": 19784, + "emetery": 19785, + "Ġaccusing": 19786, + "Ġnoticeable": 19787, + "Ġcentres": 19788, + "Ġlid": 19789, + "ĠMills": 19790, + "imgur": 19791, + "Ġzoom": 19792, + "ergic": 19793, + "Ġcompression": 19794, + "prim": 19795, + "find": 19796, + "Ġsurg": 19797, + "Ġpand": 19798, + "ĠKee": 19799, + "ĠChad": 19800, + "cellence": 19801, + "oyle": 19802, + "Ġsocialism": 19803, + "ĠTravis": 19804, + "ĠMHz": 19805, + "Ġguild": 19806, + "ALLY": 19807, + "ĠSubscribe": 19808, + "ĠRelated": 19809, + "Ġoccurrence": 19810, + "itching": 19811, + "Ġfictional": 19812, + "Ġcrush": 19813, + "ĠEA": 19814, + "cod": 19815, + "mix": 19816, + "ĠTriple": 19817, + "Ġretrieve": 19818, + "Ġstimulus": 19819, + "Ġpsychiat": 19820, + "ĠDoor": 19821, + "Ġhomosexuality": 19822, + "Ġelementary": 19823, + "Ġcellular": 19824, + "idian": 19825, + "ĠLaun": 19826, + "Ġintriguing": 19827, + "Ġfoam": 19828, + "ĠBass": 19829, + "idi": 19830, + "itsu": 19831, + "Ġassure": 19832, + "Ġcongrat": 19833, + "Ġbusinessman": 19834, + "ĠBoost": 19835, + "close": 19836, + "Ġlied": 19837, + "Ġsciences": 19838, + "ĠOmega": 19839, + "ĠGraphics": 19840, + "Ġ<=": 19841, + "spoken": 19842, + "Ġconnectivity": 19843, + "Saturday": 19844, + "ĠAvengers": 19845, + "Ġtoggle": 19846, + "Ġankle": 19847, + "Ġnationalist": 19848, + "model": 19849, + "ĠPool": 19850, + "ophobia": 19851, + "Var": 19852, + "ĠMons": 19853, + "atories": 19854, + "Ġaggressively": 19855, + "Clear": 19856, + "Forge": 19857, + "acters": 19858, + "Ġhedge": 19859, + "Ġpipes": 19860, + "Ġblunt": 19861, + "Ġsq": 19862, + "Ġremotely": 19863, + "Wed": 19864, + "asers": 19865, + "Ġrefriger": 19866, + "Ġtiles": 19867, + "Ġrescued": 19868, + "Ġcomprised": 19869, + "insky": 19870, + "Ġmanif": 19871, + "avanaugh": 19872, + "Ġprolifer": 19873, + "Ġaligned": 19874, + "xml": 19875, + "Ġtriv": 19876, + "Ġcoordination": 19877, + "ĠPER": 19878, + "ĠQuote": 19879, + "134": 19880, + "bf": 19881, + "ĠSaw": 19882, + "Ġtermination": 19883, + "Ġ190": 19884, + "Ġadditions": 19885, + "Ġtrio": 19886, + "Ġprojections": 19887, + "Ġpositively": 19888, + "Ġinclusive": 19889, + "Ġmembr": 19890, + "1990": 19891, + "older": 19892, + "Ġpracticed": 19893, + "inkle": 19894, + "Arch": 19895, + "Ġstarters": 19896, + "arius": 19897, + "Ġintermediate": 19898, + "ĠBenef": 19899, + "ĠKiller": 19900, + "Ġinterventions": 19901, + "ĠKil": 19902, + "ĠFlying": 19903, + "Inv": 19904, + "Ġpremature": 19905, + "Ġpsychiatric": 19906, + "Ġindie": 19907, + "Ġcollar": 19908, + "ĠRainbow": 19909, + "afi": 19910, + "Ġdisruption": 19911, + "ĠFOX": 19912, + "casting": 19913, + "Ġmisdem": 19914, + "cro": 19915, + "Ġwipe": 19916, + "ardon": 19917, + "Ġbast": 19918, + "ĠTommy": 19919, + "ĠRepresentative": 19920, + "Ġbelly": 19921, + "ĠPO": 19922, + "ĠBreitbart": 19923, + "132": 19924, + "Ġmessaging": 19925, + "Should": 19926, + "References": 19927, + "ĠGRE": 19928, + "istical": 19929, + "LP": 19930, + "ĠCav": 19931, + "ĠCrazy": 19932, + "Ġintuitive": 19933, + "keeping": 19934, + "ĠMoss": 19935, + "Ġdiscontin": 19936, + "ĠModule": 19937, + "Ġunrelated": 19938, + "ĠPractice": 19939, + "ĠTransport": 19940, + "Ġstatistically": 19941, + "orns": 19942, + "Ġsized": 19943, + "pu": 19944, + "Ġcaf": 19945, + "ĠWorlds": 19946, + "ĠRodgers": 19947, + "ĠLun": 19948, + "ĠComic": 19949, + "living": 19950, + "Ġcared": 19951, + "Ġclimbed": 19952, + "){": 19953, + "Ġconsisted": 19954, + "Ġmedieval": 19955, + "folk": 19956, + "Ġhacked": 19957, + "Ġdire": 19958, + "ĠHermione": 19959, + "Ġtended": 19960, + "ceans": 19961, + "Daniel": 19962, + "went": 19963, + "Ġlegislators": 19964, + "Ġredes": 19965, + "games": 19966, + "Ġgn": 19967, + "amiliar": 19968, + "Ġ++": 19969, + "ggy": 19970, + "threat": 19971, + "Ġmagnet": 19972, + "Ġperceive": 19973, + "Ġzip": 19974, + "Ġindictment": 19975, + "Ġcritique": 19976, + "gard": 19977, + "ĠSafe": 19978, + "ĠCream": 19979, + "Ġadvent": 19980, + "oba": 19981, + "Ġvowed": 19982, + "ousands": 19983, + "Ġski": 19984, + "Ġabortions": 19985, + "uart": 19986, + "Ġstunned": 19987, + "Ġadvancing": 19988, + "Ġlacked": 19989, + "Ġ\\\"": 19990, + "Ġschizophren": 19991, + "Ġelegant": 19992, + "Ġconferences": 19993, + "Ġcanceled": 19994, + "ĠHudson": 19995, + "ĠHopefully": 19996, + "Ġtrump": 19997, + "Ġfrequencies": 19998, + "Ġmeteor": 19999, + "ĠJunior": 20000, + "ĠFleet": 20001, + "ĠMalcolm": 20002, + "ĠTools": 20003, + "Ġ........": 20004, + "Ġhobby": 20005, + "ĠEuropeans": 20006, + "Ġ1500": 20007, + "ĠInto": 20008, + "Ġsway": 20009, + "ĠAppro": 20010, + "ĠCompl": 20011, + "Community": 20012, + "Ġtide": 20013, + "ĠSummit": 20014, + "ä»": 20015, + "Ġintervals": 20016, + "ĠEther": 20017, + "Ġhabitat": 20018, + "ĠStevens": 20019, + "lishing": 20020, + "ĠDomain": 20021, + "Ġtriggers": 20022, + "Ġchasing": 20023, + "Ġcharm": 20024, + "ĠFlower": 20025, + "itored": 20026, + "Ġblessing": 20027, + "Ġtextures": 20028, + "Five": 20029, + "Ġliquor": 20030, + "RP": 20031, + "FIN": 20032, + "Ġ1962": 20033, + "CAR": 20034, + "Unknown": 20035, + "Ġresil": 20036, + "ĠLily": 20037, + "Ġabundance": 20038, + "Ġpredictable": 20039, + "rar": 20040, + "Ġbullshit": 20041, + "leen": 20042, + "chet": 20043, + "Mor": 20044, + "Much": 20045, + "ä¹": 20046, + "Ġemphasized": 20047, + "Ġcrust": 20048, + "Ġprimitive": 20049, + "Ġenjoyable": 20050, + "ĠPictures": 20051, + "Ġteammate": 20052, + "pler": 20053, + "ĠTol": 20054, + "ĠKane": 20055, + "Ġsummoned": 20056, + "thy": 20057, + "rama": 20058, + "ĠHonda": 20059, + "Ġrealizing": 20060, + "Ġquicker": 20061, + "Ġconcentrate": 20062, + "clear": 20063, + "Ġ210": 20064, + "ĠErdogan": 20065, + "aris": 20066, + "Ġresponds": 20067, + "ĠBI": 20068, + "Ġeligibility": 20069, + "Ġpushes": 20070, + "ĠIdaho": 20071, + "Ġaggrav": 20072, + "Ġruins": 20073, + "urations": 20074, + "Ġbans": 20075, + "Ġanat": 20076, + "share": 20077, + "Ġgrind": 20078, + "hin": 20079, + "umen": 20080, + "Ġutilities": 20081, + "ĠYankees": 20082, + "Ġdatabases": 20083, + "ĠDD": 20084, + "Ġdisplaced": 20085, + "Ġdependencies": 20086, + "Ġstimulation": 20087, + "hun": 20088, + "houses": 20089, + "ĠPretty": 20090, + "ĠRavens": 20091, + "ĠTODAY": 20092, + "Ġassociates": 20093, + "Ġtherape": 20094, + "cled": 20095, + "Ġdeer": 20096, + "Ġrepairs": 20097, + "rentice": 20098, + "Ġreceptors": 20099, + "Ġremed": 20100, + "ĠCe": 20101, + "Ġmarriages": 20102, + "Ġballots": 20103, + "ĠSoldier": 20104, + "Ġhilarious": 20105, + "opl": 20106, + "138": 20107, + "Ġinherently": 20108, + "Ġignorant": 20109, + "Ġbounce": 20110, + "ĠEaster": 20111, + "RELATED": 20112, + "ĠCurrency": 20113, + "EV": 20114, + "ãĥŀ": 20115, + "ĠLead": 20116, + "Ġdeceased": 20117, + "Brien": 20118, + "ĠMusk": 20119, + "JS": 20120, + "Ġmerge": 20121, + "hearted": 20122, + "creat": 20123, + "mitt": 20124, + "mund": 20125, + "ĠâĢĭ": 20126, + "ĠBag": 20127, + "Ġprojection": 20128, + "Ġjava": 20129, + "ĠStandards": 20130, + "ĠLeonard": 20131, + "Ġcoconut": 20132, + "ĠPopulation": 20133, + "Ġtraject": 20134, + "Ġimply": 20135, + "Ġcuriosity": 20136, + "ĠDB": 20137, + "ĠFresh": 20138, + "ĠPor": 20139, + "Ġheavier": 20140, + "neys": 20141, + "gomery": 20142, + "Ġdeserved": 20143, + "Ġphrases": 20144, + "ĠGC": 20145, + "Ġyeast": 20146, + "desc": 20147, + "Death": 20148, + "Ġreboot": 20149, + "Ġmetadata": 20150, + "ICAL": 20151, + "Ġrepay": 20152, + "ĠIndependence": 20153, + "Ġsuburban": 20154, + "icals": 20155, + "Ġatop": 20156, + "Ġallocation": 20157, + "generation": 20158, + "ĠGram": 20159, + "Ġmoisture": 20160, + "Ġpine": 20161, + "ĠLiberals": 20162, + "Ġaides": 20163, + "Ġunderest": 20164, + "ĠBerry": 20165, + "Ġceremon": 20166, + "370": 20167, + "astrous": 20168, + "ĠPirates": 20169, + "Ġtense": 20170, + "ĠIndustries": 20171, + "ĠAppeals": 20172, + "ĠNear": 20173, + "Ġè£ıç": 20174, + "Ġlovers": 20175, + "ĠCAP": 20176, + "ĠCraw": 20177, + "Ġgiants": 20178, + "Ġefficacy": 20179, + "Element": 20180, + "ĠBehavior": 20181, + "ĠToyota": 20182, + "Ġintest": 20183, + "Priv": 20184, + "AI": 20185, + "Ġmaneuver": 20186, + "Ġperfection": 20187, + "Ġbang": 20188, + "paper": 20189, + "rill": 20190, + "George": 20191, + "border": 20192, + "inters": 20193, + "ĠSeth": 20194, + "Ġclues": 20195, + "ĠLevi": 20196, + "ĠRevenue": 20197, + "147": 20198, + "Ġvapor": 20199, + "Ġfortunate": 20200, + "Ġthreatens": 20201, + "Ġvet": 20202, + "Ġdependency": 20203, + "ersed": 20204, + "article": 20205, + "ĠBlizzard": 20206, + "Ġchlor": 20207, + "Ġminus": 20208, + "ĠBills": 20209, + "Ġcryptocurrency": 20210, + "Ġmetabolism": 20211, + "tering": 20212, + "Ġpestic": 20213, + "steps": 20214, + "ĠTreasure": 20215, + "racted": 20216, + "ĠConstant": 20217, + "Ġtemp": 20218, + "139": 20219, + "ĠDetective": 20220, + "urally": 20221, + "Ġrecovering": 20222, + "Ġcortex": 20223, + "Ġ144": 20224, + "closed": 20225, + "Ġprejudice": 20226, + "aunted": 20227, + "Ġstorms": 20228, + "ĠNOW": 20229, + "Ġmachinery": 20230, + "Address": 20231, + "Ġcompelled": 20232, + "270": 20233, + "Ġdespair": 20234, + "bane": 20235, + "Ġvegetable": 20236, + "Ġbeds": 20237, + "Learn": 20238, + "Ġcolorful": 20239, + "Ġspike": 20240, + "Ġmargins": 20241, + "Ġsympathy": 20242, + "Ġworkshop": 20243, + "ĠCBC": 20244, + "Sat": 20245, + "Ġburns": 20246, + "ĠGender": 20247, + "Ġ129": 20248, + "ĠCable": 20249, + "Ġdebts": 20250, + "ĠTheresa": 20251, + "Ġreflecting": 20252, + "Ġairst": 20253, + "Ġrim": 20254, + "ramid": 20255, + "Ġweaknesses": 20256, + "Writ": 20257, + "oggle": 20258, + "ti": 20259, + "ĠCharge": 20260, + "Ġweighed": 20261, + "Ġ(.": 20262, + "Ġlaughter": 20263, + "Ġrouter": 20264, + "ĠDemocracy": 20265, + "Dear": 20266, + "Ġhasht": 20267, + "Ġdy": 20268, + "Ġhints": 20269, + "running": 20270, + "Ġfinishes": 20271, + "arus": 20272, + "Mass": 20273, + "result": 20274, + "ascus": 20275, + "Ġvintage": 20276, + "Ġconqu": 20277, + "Ġwildly": 20278, + "acist": 20279, + "Ġlingu": 20280, + "Ġprotagonist": 20281, + "strom": 20282, + "teenth": 20283, + "ĠSolo": 20284, + "mac": 20285, + "filled": 20286, + "Ġrenown": 20287, + "itives": 20288, + "Ġmotive": 20289, + "ĠAntar": 20290, + "ĠMann": 20291, + "ĠAdjust": 20292, + "Ġrockets": 20293, + "Ġtroubling": 20294, + "ei": 20295, + "Ġorganisms": 20296, + "assis": 20297, + "Christian": 20298, + "Ġ145": 20299, + "ĠHass": 20300, + "Ġswall": 20301, + "Ġwax": 20302, + "ĠSurvival": 20303, + "VS": 20304, + "ĠMurd": 20305, + "vd": 20306, + "standard": 20307, + "Ġdragons": 20308, + "Ġacceleration": 20309, + "rational": 20310, + "final": 20311, + "Ġpaired": 20312, + "ĠEthereum": 20313, + "Ġinterfaces": 20314, + "Ġresent": 20315, + "Ġartifacts": 20316, + "Å«": 20317, + "arel": 20318, + "Ġcompetitor": 20319, + "ĠNicholas": 20320, + "ĠSurface": 20321, + "cpp": 20322, + "ĠTot": 20323, + "Ġeconomically": 20324, + "Ġorganised": 20325, + "Ġenforced": 20326, + "inho": 20327, + "Ġvarieties": 20328, + "Ġabdom": 20329, + "ĠBailey": 20330, + "idav": 20331, + "ĠSalv": 20332, + "paid": 20333, + "Ġaltitude": 20334, + "essert": 20335, + "ĠGutenberg": 20336, + "area": 20337, + "opoulos": 20338, + "Ġprofessors": 20339, + "iggs": 20340, + "ĠFate": 20341, + "hey": 20342, + "Ġ3000": 20343, + "Dist": 20344, + "Ġtwins": 20345, + "cill": 20346, + "ĠMaps": 20347, + "Ġtraps": 20348, + "Ġweed": 20349, + "ĠKiss": 20350, + "Ġyoga": 20351, + "Ġrecipients": 20352, + "ĠWestminster": 20353, + "Ġpools": 20354, + "ĠWalmart": 20355, + "188": 20356, + "ĠSchools": 20357, + "attack": 20358, + "ĠARM": 20359, + "paragraph": 20360, + "Warning": 20361, + "jl": 20362, + "Ġselfish": 20363, + "anchez": 20364, + "ĠHeights": 20365, + "Fre": 20366, + "ĠSoph": 20367, + "Ġ--------------------------------": 20368, + "tml": 20369, + "333": 20370, + "Ġraids": 20371, + "Ġsatellites": 20372, + "KEY": 20373, + "Ġlasts": 20374, + "ÑĤ": 20375, + "Ins": 20376, + "ĠDame": 20377, + "Ġunpredict": 20378, + "///": 20379, + "ghai": 20380, + "Ġartillery": 20381, + "Ġcruise": 20382, + "Ġgel": 20383, + "ĠCabinet": 20384, + "Ġblows": 20385, + "ĠEsp": 20386, + "Ġproximity": 20387, + "othe": 20388, + "ĠSkills": 20389, + "ĠUpper": 20390, + "obo": 20391, + "ĠNDP": 20392, + "Ġenjoys": 20393, + "Ġrepeating": 20394, + "ĠConstruction": 20395, + "ĠQuestions": 20396, + "Hillary": 20397, + "Ġuint": 20398, + "Ġprocessors": 20399, + "ĠGibson": 20400, + "ĠMultiple": 20401, + "qa": 20402, + "ĠBom": 20403, + "ĠMiles": 20404, + "ventional": 20405, + "Ġhurts": 20406, + "skin": 20407, + "ĠAIDS": 20408, + "Ġadvisers": 20409, + "ĠRoot": 20410, + "Ġmethodology": 20411, + "ĠDale": 20412, + "Ġdeton": 20413, + "ĠKnowledge": 20414, + "sequently": 20415, + "Ġ121": 20416, + "Ġconnects": 20417, + "Cy": 20418, + "ĠDanger": 20419, + "Ġcontributors": 20420, + "ĠBent": 20421, + "Ġbrass": 20422, + "ĠGuns": 20423, + "into": 20424, + "ĠFortune": 20425, + "Ġbroker": 20426, + "balance": 20427, + "Ġlengths": 20428, + "Ġvic": 20429, + "Ġaveraging": 20430, + "Ġappropriately": 20431, + "ĠCamera": 20432, + "Ġsandwich": 20433, + "ĠCDC": 20434, + "Ġcoordinate": 20435, + "Ġnavig": 20436, + "Ġgoodness": 20437, + "laim": 20438, + "Ġbrake": 20439, + "Ġextremist": 20440, + "ĠWake": 20441, + "ĠMend": 20442, + "ĠTiny": 20443, + "ĠCOL": 20444, + "ĠRF": 20445, + "ĠDual": 20446, + "ĠWine": 20447, + "Case": 20448, + "Ġrefined": 20449, + "Ġlamp": 20450, + "Lead": 20451, + "Ġbapt": 20452, + "ĠCarb": 20453, + "ĠSadd": 20454, + "ĠMinneapolis": 20455, + "PDF": 20456, + "Early": 20457, + "ĠHidden": 20458, + "Its": 20459, + "ĠTIME": 20460, + "Ġpap": 20461, + "Ġcommissioned": 20462, + "ĠFew": 20463, + "ĠColts": 20464, + "ĠBren": 20465, + "Ġbothered": 20466, + "Ġlikewise": 20467, + "Exper": 20468, + "ĠSchw": 20469, + "cry": 20470, + "nn": 20471, + "ĠMitch": 20472, + "imon": 20473, + "MG": 20474, + "bm": 20475, + "UMP": 20476, + "rays": 20477, + "Ġregistry": 20478, + "Ġ270": 20479, + "achine": 20480, + "rella": 20481, + "anting": 20482, + "00000": 20483, + "Ġruined": 20484, + "spot": 20485, + "Ġta": 20486, + "Ġmaximize": 20487, + "Ġinconven": 20488, + "Dead": 20489, + "Human": 20490, + "Enabled": 20491, + "ĠMarie": 20492, + "Ġchill": 20493, + "ĠParadise": 20494, + "Ġstarring": 20495, + "ĠLatino": 20496, + "ĠProtocol": 20497, + "ĠEVER": 20498, + "Ġsuppliers": 20499, + "message": 20500, + "ĠBrock": 20501, + "Ġserum": 20502, + "âĸĪâĸĪâĸĪâĸĪ": 20503, + "Ġencomp": 20504, + "Ġambition": 20505, + "uese": 20506, + "Ġarrows": 20507, + "Andrew": 20508, + "Ġantenna": 20509, + "Ġ1961": 20510, + "ĠBark": 20511, + "Ġbool": 20512, + "ãĤª": 20513, + "ĠStorage": 20514, + "Ġrailway": 20515, + "Ġtougher": 20516, + "ĠCad": 20517, + "Ġwashing": 20518, + "Py": 20519, + "']": 20520, + "embed": 20521, + "ĠMemphis": 20522, + "ackle": 20523, + "Ġfamously": 20524, + "ĠFortunately": 20525, + "ovies": 20526, + "Ġmindset": 20527, + "Ġsneak": 20528, + "ĠDh": 20529, + "RAW": 20530, + "ĠSimpson": 20531, + "Ġlivest": 20532, + "Ġlandmark": 20533, + "Ġcement": 20534, + "Low": 20535, + "Ġthrilled": 20536, + "ĠCourse": 20537, + "inel": 20538, + "Ġchuck": 20539, + "idate": 20540, + "global": 20541, + "Ġwhit": 20542, + "Ġ�": 20543, + "adays": 20544, + "ski": 20545, + "ĠSV": 20546, + "Ġviruses": 20547, + "306": 20548, + "ĠRespons": 20549, + "Ġtheaters": 20550, + "ĠBranch": 20551, + "ĠGeneva": 20552, + "ĠMK": 20553, + "Ġunbeliev": 20554, + "Ġcommunist": 20555, + "Original": 20556, + "ĠReceived": 20557, + "ĠTransfer": 20558, + "ĠArg": 20559, + "Input": 20560, + "ĠStrategy": 20561, + "Ġpalace": 20562, + "thening": 20563, + "Dri": 20564, + "Ġsentencing": 20565, + "umbnail": 20566, + "Ġpins": 20567, + "recy": 20568, + "Ġsiblings": 20569, + "Getting": 20570, + "ĠBU": 20571, + "ĠNorthwest": 20572, + "Ġprolonged": 20573, + "ĠSakura": 20574, + "Comb": 20575, + "ĠBour": 20576, + "Ġinadequate": 20577, + "ĠKash": 20578, + "Ġusername": 20579, + "ĠImprove": 20580, + "Ġbattling": 20581, + "ĠMAC": 20582, + "Ġcurriculum": 20583, + "Ġsoda": 20584, + "ĠCannon": 20585, + "Ġsensible": 20586, + "spons": 20587, + "December": 20588, + "Ġwicked": 20589, + "ĠPengu": 20590, + "Ġdictators": 20591, + "ĠHearts": 20592, + "ogyn": 20593, + "Ġsimilarities": 20594, + "ĠStats": 20595, + "Ġhollow": 20596, + "itations": 20597, + "\":[": 20598, + "Ġhover": 20599, + "ĠListen": 20600, + "sch": 20601, + "Sund": 20602, + "Ġcad": 20603, + "ĠParks": 20604, + "Ġlur": 20605, + "Ġhype": 20606, + "ĠLem": 20607, + "NAME": 20608, + "isure": 20609, + "Friday": 20610, + "Ġshoots": 20611, + "Ġcloses": 20612, + "Ġdb": 20613, + "ĠRidge": 20614, + "ĠDifferent": 20615, + "Ġreplies": 20616, + "ĠBroadway": 20617, + "opers": 20618, + "Ġintoler": 20619, + "ĠZeus": 20620, + "akespe": 20621, + "Ġproprietary": 20622, + "Ġrequesting": 20623, + "Ġcontrollers": 20624, + "ĠMIN": 20625, + "imedia": 20626, + "becca": 20627, + "Ġexpans": 20628, + "Ġoils": 20629, + "Bot": 20630, + "ĠChand": 20631, + "Ġprinter": 20632, + "Ġtopped": 20633, + "ĠPOL": 20634, + "ĠEarlier": 20635, + "Social": 20636, + "avin": 20637, + "Ġdecreases": 20638, + "ĠSeb": 20639, + "Ġspecifications": 20640, + "ĠBlast": 20641, + "ĠKurt": 20642, + "Ġfreel": 20643, + "Brown": 20644, + "Ġdilig": 20645, + "roe": 20646, + "ĠProblem": 20647, + "ĠQuad": 20648, + "Ġdecentral": 20649, + "ĠVector": 20650, + "anut": 20651, + "Ġplugins": 20652, + "ĠGregory": 20653, + "Ġfucked": 20654, + "elines": 20655, + "ĠAmbassador": 20656, + "take": 20657, + "Ġcleans": 20658, + "ongyang": 20659, + "Anonymous": 20660, + "stro": 20661, + "\"}": 20662, + "aline": 20663, + "ĠOdd": 20664, + "ĠEug": 20665, + "216": 20666, + "Ġboil": 20667, + "ĠPowers": 20668, + "Ġnurses": 20669, + "Obviously": 20670, + "ĠTechnical": 20671, + "Ġexceeded": 20672, + "ORS": 20673, + "Ġextremists": 20674, + "Ġtraces": 20675, + "expl": 20676, + "Ġcomr": 20677, + "ĠSach": 20678, + ")/": 20679, + "Ġmasks": 20680, + "Ġsci": 20681, + "Bon": 20682, + "Ġregression": 20683, + "wegian": 20684, + "Ġadvisor": 20685, + "itures": 20686, + "ĠVo": 20687, + "example": 20688, + "ĠInstruct": 20689, + "Ġsiege": 20690, + "Ġreductions": 20691, + "ptr": 20692, + "Ġstatutory": 20693, + "Ġremoves": 20694, + "Ġpuck": 20695, + "redits": 20696, + "Ġbee": 20697, + "Ġsalad": 20698, + "Ġpromotions": 20699, + "ĠJoshua": 20700, + "withstanding": 20701, + "ETH": 20702, + "ĠCha": 20703, + "imus": 20704, + "Ġexpenditure": 20705, + "aunting": 20706, + "Ġdelighted": 20707, + "Ġ155": 20708, + "beh": 20709, + "Ġcarpet": 20710, + "ĠSpart": 20711, + "Ġjungle": 20712, + "lists": 20713, + "Ġbullying": 20714, + "ĠNobel": 20715, + "ĠGlen": 20716, + "Ġreferenced": 20717, + "Ġintroduces": 20718, + "sein": 20719, + "Ġchopped": 20720, + "glass": 20721, + "ĠWrest": 20722, + "Ġneutrality": 20723, + "ĠâĻ": 20724, + "Ġinvestigator": 20725, + "Ġshelves": 20726, + "Ġunconstitutional": 20727, + "Ġreproduction": 20728, + "Ġmerchant": 20729, + "mia": 20730, + "Ġmetrics": 20731, + "Ġexplosives": 20732, + "ĠSonia": 20733, + "Ġbodily": 20734, + "Ġthickness": 20735, + "Ġpredominantly": 20736, + "ĠAbility": 20737, + "Ġmonitored": 20738, + "ICH": 20739, + "Ġ].": 20740, + "ĠMartinez": 20741, + "Ġvisibility": 20742, + "Ġqueries": 20743, + "Ġgenocide": 20744, + "ĠWarfare": 20745, + "Query": 20746, + "Ġstudios": 20747, + "Ġembry": 20748, + "Ġcorridor": 20749, + "Ġcleaned": 20750, + "complete": 20751, + "ĠMH": 20752, + "Ġenrollment": 20753, + "INGS": 20754, + "Ġimpacted": 20755, + "Ġdisastrous": 20756, + "ĠYun": 20757, + "ĠClaire": 20758, + "ĠBasically": 20759, + "yt": 20760, + "usterity": 20761, + "Ġindirectly": 20762, + "wik": 20763, + "Ġdod": 20764, + "ĠCarr": 20765, + "Ġamp": 20766, + "Ġprohibit": 20767, + "ĠInitial": 20768, + "ĠRd": 20769, + "iji": 20770, + "Ġeducate": 20771, + "corn": 20772, + "iott": 20773, + "ĠBeauty": 20774, + "Ġdetective": 20775, + "ĠConn": 20776, + "since": 20777, + "Ġstagger": 20778, + "Ġobese": 20779, + "Ġbree": 20780, + "ologic": 20781, + "isse": 20782, + "walker": 20783, + "Ġblades": 20784, + "Ġlawful": 20785, + "func": 20786, + "ĠBehind": 20787, + "Ġappetite": 20788, + "Ġ(*": 20789, + "Ġtennis": 20790, + "Ġoffspring": 20791, + "Ġjets": 20792, + "Ġstructured": 20793, + "Ġaforementioned": 20794, + "Nov": 20795, + "Ġscaling": 20796, + "fill": 20797, + "Ġstew": 20798, + "Ġcurb": 20799, + "ĠStephan": 20800, + "edIn": 20801, + "SF": 20802, + "obic": 20803, + "éŃĶ": 20804, + "oug": 20805, + "ĠMM": 20806, + "Ġgenetically": 20807, + "opez": 20808, + "136": 20809, + "Ġumb": 20810, + "ancers": 20811, + "Ġcohort": 20812, + "Ġmerchandise": 20813, + "Ġimposing": 20814, + "ĠLegislature": 20815, + "ĠArchive": 20816, + "ivia": 20817, + "ĠNaval": 20818, + "Ġoffences": 20819, + "Ġmiracle": 20820, + "Ġsnapped": 20821, + "Ġfoes": 20822, + "Ġextensively": 20823, + "ĠRaf": 20824, + "Ġcater": 20825, + "edience": 20826, + "Kit": 20827, + "ĠBin": 20828, + "Ġrecommends": 20829, + "ĠCities": 20830, + "Ġrigid": 20831, + "ĠREAD": 20832, + "ĠNoble": 20833, + "ĠTian": 20834, + "Ġcertificates": 20835, + "antis": 20836, + "oiler": 20837, + "ĠBuddhist": 20838, + "did": 20839, + "Ġsurveyed": 20840, + "Ġdownward": 20841, + "Ġprints": 20842, + "ĠMotion": 20843, + "ronics": 20844, + "ĠSans": 20845, + "ossibly": 20846, + "uctions": 20847, + "Ġcolonies": 20848, + "ĠDanish": 20849, + "unit": 20850, + "Ġspoil": 20851, + "Ġadvisory": 20852, + "berries": 20853, + "Plan": 20854, + "Ġspecification": 20855, + "ophers": 20856, + "ĠResource": 20857, + "Ġshirts": 20858, + "prisingly": 20859, + "communications": 20860, + "Ġtrivial": 20861, + "Ġmentioning": 20862, + "isexual": 20863, + "Ġsupplements": 20864, + "Ġsupervision": 20865, + "BP": 20866, + "vor": 20867, + "Ġwit": 20868, + "Ġcooldown": 20869, + "Ġplaintiff": 20870, + "ĠReviews": 20871, + "ĠSri": 20872, + "ĠMint": 20873, + "ĠSugar": 20874, + "Ġafterward": 20875, + "ĠPriest": 20876, + "ĠInvestment": 20877, + "ogene": 20878, + "ĠTaking": 20879, + "Ġstretching": 20880, + "Ġinflammation": 20881, + "ĠTehran": 20882, + "Ġlining": 20883, + "Ġfreezing": 20884, + "ĠEntity": 20885, + "Ġinspiring": 20886, + "special": 20887, + "price": 20888, + "Ġsue": 20889, + "ĠPorter": 20890, + "ounge": 20891, + "ETA": 20892, + "ĠDerek": 20893, + "ĠLuis": 20894, + "uo": 20895, + "ymph": 20896, + "Ġexterior": 20897, + "ihil": 20898, + "ĠAshley": 20899, + "inator": 20900, + "Ġnutrients": 20901, + "ĠThrones": 20902, + "Ġfinances": 20903, + "ĠInspect": 20904, + "Ġspecially": 20905, + "ĠRequired": 20906, + "ĠPTS": 20907, + "ĠViolence": 20908, + "ointed": 20909, + "shots": 20910, + "Ġexcerpt": 20911, + "coon": 20912, + "INS": 20913, + "ĠGri": 20914, + "Ġrecognised": 20915, + "Week": 20916, + "Young": 20917, + "Ġvom": 20918, + "isle": 20919, + "ĠCurry": 20920, + "ĠBuddh": 20921, + "Ġnotebook": 20922, + "Ġdurable": 20923, + "/?": 20924, + "ĠGad": 20925, + "ĠPupp": 20926, + "Ġforgive": 20927, + "park": 20928, + "Ġpersonalities": 20929, + "analysis": 20930, + "clamation": 20931, + "Ġelevator": 20932, + "Ġwarehouse": 20933, + "ĠRole": 20934, + "unn": 20935, + "Ġillustration": 20936, + "ĠScan": 20937, + "Ġatmospheric": 20938, + "Import": 20939, + "ANC": 20940, + "ricted": 20941, + "fu": 20942, + "010": 20943, + "Ġarche": 20944, + "Ġrewarded": 20945, + "akespeare": 20946, + "Ġinternally": 20947, + "ĠRBI": 20948, + "alker": 20949, + "Ġelephant": 20950, + "owitz": 20951, + "ĠPizza": 20952, + "Ġbipartisan": 20953, + "és": 20954, + "Ġslowed": 20955, + "ĠStark": 20956, + "Ġoverride": 20957, + "OUS": 20958, + "Ġ320": 20959, + "undreds": 20960, + "ĠDeck": 20961, + "ĠCensus": 20962, + "bee": 20963, + "146": 20964, + "otor": 20965, + "Ġip": 20966, + "Ġub": 20967, + "ocations": 20968, + "ĠButton": 20969, + "rice": 20970, + "Ġcripp": 20971, + "fff": 20972, + "Ġoriginated": 20973, + "Ġoverwhelmed": 20974, + "appa": 20975, + "Ġforemost": 20976, + "âĢij": 20977, + "ĠLEG": 20978, + "release": 20979, + "eatured": 20980, + "atches": 20981, + "Ġreps": 20982, + "Ġlending": 20983, + "ĠReference": 20984, + "ĠClient": 20985, + "165": 20986, + "venth": 20987, + "Complete": 20988, + "ĠPatrol": 20989, + "Ġsworn": 20990, + "cam": 20991, + "Ġshuttle": 20992, + "ĠRalph": 20993, + "Ġhometown": 20994, + "-,": 20995, + "onal": 20996, + "ĠBP": 20997, + "åı": 20998, + "Ġpersuade": 20999, + "ĠAlexand": 21000, + "Ġcombines": 21001, + "Ġvivid": 21002, + "ĠLag": 21003, + "Ġencoding": 21004, + "Ġsalvation": 21005, + "wen": 21006, + "ĠRecovery": 21007, + "iya": 21008, + "University": 21009, + "ĠBiden": 21010, + "Ġbudgets": 21011, + "ĠTexans": 21012, + "fits": 21013, + "Ġhonored": 21014, + "Ġpython": 21015, + "TD": 21016, + "###": 21017, + "clone": 21018, + "Ġblink": 21019, + "ĠLiquid": 21020, + "Ġunemployed": 21021, + "Ġclashes": 21022, + "ĠCounsel": 21023, + "Ġdirecting": 21024, + "Ġpunct": 21025, + "ĠFalcons": 21026, + "Ġshark": 21027, + "ĠDamascus": 21028, + "Ġjeans": 21029, + "Ġembark": 21030, + "Ġseize": 21031, + "Ġupwards": 21032, + "280": 21033, + "ĠEz": 21034, + "ĠAnything": 21035, + "Ġexotic": 21036, + "lower": 21037, + "ĠCreator": 21038, + "ĠUm": 21039, + "Ġsuburbs": 21040, + "berger": 21041, + "ĠWend": 21042, + "Ġmint": 21043, + "ĠXX": 21044, + "ĠDro": 21045, + "Ġsuffers": 21046, + "Ġherb": 21047, + "tree": 21048, + "Ġfragile": 21049, + "Ġflooded": 21050, + "ĠAlcohol": 21051, + "olean": 21052, + "nyder": 21053, + "ĠKO": 21054, + "Fram": 21055, + "Ġ136": 21056, + "Ġowed": 21057, + "ĠMelee": 21058, + "ĠHash": 21059, + "Ġwhisk": 21060, + "Ġsudo": 21061, + "rr": 21062, + "Quick": 21063, + "appro": 21064, + "Ġii": 21065, + "ĠExamples": 21066, + "hee": 21067, + "Ġpromotes": 21068, + "perature": 21069, + "kar": 21070, + "ĠHonor": 21071, + "Ġsodium": 21072, + "ĠLif": 21073, + "rosso": 21074, + "intendent": 21075, + "Ġcorrespondent": 21076, + "Found": 21077, + "secret": 21078, + "Ġidentifies": 21079, + "agne": 21080, + "Ġlou": 21081, + "ĠPP": 21082, + "Ġcoincidence": 21083, + "move": 21084, + "Ġmilitia": 21085, + "Ġinfiltr": 21086, + "ĠPrimary": 21087, + "Ġpitching": 21088, + "ĠIb": 21089, + "ĠGOOD": 21090, + "ãĤ¸": 21091, + "ĠWizards": 21092, + "iral": 21093, + "ĠVenus": 21094, + "RR": 21095, + "ĠâĢķ": 21096, + "ĠCasey": 21097, + "Ġsadly": 21098, + "Ġadmire": 21099, + "Ġembarrassed": 21100, + "cb": 21101, + "Mel": 21102, + "Ġtubes": 21103, + "Ġbeautifully": 21104, + "ĠQueensland": 21105, + "Below": 21106, + "rez": 21107, + "quet": 21108, + "pleasant": 21109, + "Ġ«": 21110, + "Camp": 21111, + "Ġdecisive": 21112, + "1998": 21113, + "ĠLamb": 21114, + "utton": 21115, + "hn": 21116, + "ĠJagu": 21117, + "aunder": 21118, + "ĠCord": 21119, + "Ġclerk": 21120, + "Ġcaffe": 21121, + "Ġwiped": 21122, + "Ġreim": 21123, + "ĠMountains": 21124, + "Ġimprisoned": 21125, + "Ġdevelops": 21126, + "ĠPra": 21127, + "Ġmodeling": 21128, + "Anyone": 21129, + "ancel": 21130, + "ĠSit": 21131, + "Ġshields": 21132, + "Ġlawn": 21133, + "Ġcardiovascular": 21134, + "Ġdemonstrating": 21135, + "Ġparse": 21136, + "ĠIsraelis": 21137, + "Ġeuros": 21138, + "143": 21139, + "Ġglorious": 21140, + "inski": 21141, + "ecd": 21142, + "Ġconditioning": 21143, + "Ġhelpless": 21144, + "Ġmicrosc": 21145, + "ĠHarbor": 21146, + "Ġstakes": 21147, + "Ġ260": 21148, + "Ġunequ": 21149, + "ĠFloyd": 21150, + "Ġdamp": 21151, + "Ġapparatus": 21152, + "ĠLaws": 21153, + "Ġcounters": 21154, + "Ġinduce": 21155, + "atable": 21156, + "ĠAhmed": 21157, + "Ġslam": 21158, + "November": 21159, + "Ġpersist": 21160, + "Ġimminent": 21161, + "án": 21162, + "Ġshred": 21163, + "Ġphases": 21164, + "ĠEdmonton": 21165, + "ĠArmstrong": 21166, + "ĠMeet": 21167, + "ĠKitty": 21168, + "ÑĢ": 21169, + "circ": 21170, + "ĠAdult": 21171, + "Ġarose": 21172, + "ĠXen": 21173, + "Dan": 21174, + "gow": 21175, + "Ġsuperf": 21176, + "ĠAdmir": 21177, + "Ġendure": 21178, + "Ġkeyword": 21179, + "yrus": 21180, + "Ġyarn": 21181, + "Ġpathway": 21182, + "ĠHopkins": 21183, + "midt": 21184, + "Ġcensorship": 21185, + "dependent": 21186, + "Ġinstructor": 21187, + "Sources": 21188, + "Ġtoe": 21189, + "Ġballoon": 21190, + "Nob": 21191, + "Ġswear": 21192, + "ĠCastro": 21193, + "Ġgloss": 21194, + "ĠKavanaugh": 21195, + "Ġremarkably": 21196, + "Photos": 21197, + "ĠNom": 21198, + "ĠSoutheast": 21199, + "yers": 21200, + "Ġvalidation": 21201, + "Ġcannon": 21202, + "ĠVictory": 21203, + "ĠPierre": 21204, + "Ġcautious": 21205, + "Audio": 21206, + "Ġfetch": 21207, + "ĠGift": 21208, + "ĠHyp": 21209, + "Ġremedy": 21210, + "ZE": 21211, + "Ġscent": 21212, + "Ġbeard": 21213, + "ĠRut": 21214, + "-\"": 21215, + "Ġpatents": 21216, + "Hy": 21217, + "Ġunjust": 21218, + "Ġpotato": 21219, + "Ġforthcoming": 21220, + "Ġchef": 21221, + "ĠRift": 21222, + "affe": 21223, + "ĠROM": 21224, + "ĠLaunch": 21225, + "Ġpads": 21226, + "ĠNeo": 21227, + "Ġonset": 21228, + "Ġsqueeze": 21229, + "safe": 21230, + "Ġprefix": 21231, + "ĠTM": 21232, + "ĠNearly": 21233, + "ĠClinical": 21234, + "ĠMental": 21235, + "otiation": 21236, + "ĠUnic": 21237, + "antry": 21238, + "ĠCir": 21239, + "Ġepit": 21240, + "æ": 21241, + "Ġextracted": 21242, + "versely": 21243, + "riad": 21244, + "Ġstrains": 21245, + "Ġtops": 21246, + "Ġpoem": 21247, + "ĠRandy": 21248, + "ĠMaple": 21249, + "THER": 21250, + "upiter": 21251, + "ĠSSD": 21252, + "ļé": 21253, + "Ġuncon": 21254, + "pering": 21255, + "Ġslept": 21256, + "iners": 21257, + "Ġunderwater": 21258, + "ĠEvidence": 21259, + "gone": 21260, + "205": 21261, + "Ġhistorians": 21262, + "Ġsynthesis": 21263, + "Ġfrog": 21264, + "basketball": 21265, + "Ġvibrant": 21266, + "Ġsubord": 21267, + "Ġ365": 21268, + "ĠDial": 21269, + "Ġcooperate": 21270, + "HAHA": 21271, + "Ġgreeted": 21272, + "158": 21273, + "Ġjazz": 21274, + "Ġintox": 21275, + "ĠWalking": 21276, + "Ġsupervisor": 21277, + "ĠFusion": 21278, + "ĠMercedes": 21279, + "send": 21280, + "Ham": 21281, + "sd": 21282, + "nl": 21283, + "Ġtours": 21284, + "ĠFIFA": 21285, + "Ġculp": 21286, + "gd": 21287, + "304": 21288, + "Ġpleas": 21289, + "Ġillustrates": 21290, + "ĠColombia": 21291, + "Ġhighlighting": 21292, + "ĠSummary": 21293, + "Ġexposing": 21294, + "ĠDru": 21295, + "Ġirony": 21296, + "ritional": 21297, + "ĠCarroll": 21298, + "ĠEllis": 21299, + "Pict": 21300, + "ĠRapt": 21301, + "Ġadapter": 21302, + "Ġunm": 21303, + "Ġcorpse": 21304, + "Ġcelebrities": 21305, + "Den": 21306, + "atum": 21307, + "ĠApocalypse": 21308, + "ĠWag": 21309, + "lining": 21310, + "Ġhormones": 21311, + "Rub": 21312, + "ĠXi": 21313, + "ĠVaults": 21314, + "208": 21315, + "alkyrie": 21316, + "inosaur": 21317, + "Ġfeeds": 21318, + "vity": 21319, + "Ġdefeating": 21320, + "Wait": 21321, + "Ġemphasize": 21322, + "ĠSteelers": 21323, + "yrinth": 21324, + "leys": 21325, + "ĠWhenever": 21326, + "Currently": 21327, + "ĠClock": 21328, + "Ġcollectively": 21329, + "anyon": 21330, + "ĠJP": 21331, + "Ġmentality": 21332, + "Ġdownloads": 21333, + "Ġsurroundings": 21334, + "ĠBarnes": 21335, + "Ġflagship": 21336, + "Ġindicators": 21337, + "Ġgrapp": 21338, + "January": 21339, + "ĠElemental": 21340, + "ĠAthena": 21341, + "ibal": 21342, + "Ġsights": 21343, + "Ġcapita": 21344, + "ĠTreaty": 21345, + "Ġvoiced": 21346, + "ĠGaz": 21347, + "lette": 21348, + "Ġya": 21349, + "Ġexpired": 21350, + "Legend": 21351, + "Hot": 21352, + "nature": 21353, + "Ġunstable": 21354, + "Ġ280": 21355, + "ú": 21356, + "Comment": 21357, + "ALE": 21358, + "Ġquests": 21359, + "Ġhandler": 21360, + "nis": 21361, + "Ġversatile": 21362, + "Ġconceal": 21363, + "engeance": 21364, + "ĠInteractive": 21365, + "Ġobsessed": 21366, + "ĠDogs": 21367, + "Ġcracked": 21368, + "Sound": 21369, + "sv": 21370, + "ĠDylan": 21371, + "roads": 21372, + "fx": 21373, + "ĠCatholics": 21374, + "ĠHag": 21375, + "Ġslammed": 21376, + "Ġglowing": 21377, + "sale": 21378, + "Ġtissues": 21379, + "ĠChi": 21380, + "nee": 21381, + "Ġcher": 21382, + "sic": 21383, + "urrection": 21384, + "Ġbacon": 21385, + "ulatory": 21386, + ").\"": 21387, + "Ġirregular": 21388, + "FORM": 21389, + "assed": 21390, + "Ġintentional": 21391, + "Ġcompensate": 21392, + "ĠSpeaking": 21393, + "ĠSets": 21394, + "153": 21395, + "Ġconventions": 21396, + "bands": 21397, + "emade": 21398, + "Ġecc": 21399, + "ĠWinston": 21400, + "ĠAssassin": 21401, + "ĠBelgian": 21402, + "Ġdependence": 21403, + "Ġniche": 21404, + "Ġbark": 21405, + "ĠJazz": 21406, + "Ġdisadvantage": 21407, + "Ġgasoline": 21408, + "Ġ165": 21409, + "çļĦ": 21410, + "essa": 21411, + "module": 21412, + "angular": 21413, + "OY": 21414, + "ĠTreatment": 21415, + "itas": 21416, + "olation": 21417, + "ĠArnold": 21418, + "Ġfeud": 21419, + "ĠNest": 21420, + "Ġtheatre": 21421, + "ewater": 21422, + "Ġminors": 21423, + "olicy": 21424, + "ĠHaven": 21425, + "division": 21426, + "Ġtrunk": 21427, + "Far": 21428, + "ĠPull": 21429, + "Ġcapturing": 21430, + "Ġ1800": 21431, + "ĠTeen": 21432, + "Ġexempl": 21433, + "Ġclinics": 21434, + "ĠBurg": 21435, + "Ġsubstit": 21436, + "Ġpayload": 21437, + "ĠLav": 21438, + "ĠTroy": 21439, + "ĠWitness": 21440, + "Ġfragments": 21441, + "Ġpasswords": 21442, + "Ġgospel": 21443, + "ĠGin": 21444, + "Ġtenants": 21445, + "olith": 21446, + "Six": 21447, + "Previous": 21448, + "ĠAges": 21449, + "ĠDarwin": 21450, + "Ġblat": 21451, + "Ġempathy": 21452, + "smith": 21453, + "bag": 21454, + "ĠEcho": 21455, + "ĠCamb": 21456, + "ĠMadd": 21457, + "ĠBoo": 21458, + "Ġrede": 21459, + "ĠBurning": 21460, + "Ġsmoothly": 21461, + "ĠAdrian": 21462, + "ĠVampire": 21463, + "ĠMonsters": 21464, + "steam": 21465, + "Style": 21466, + "Ma": 21467, + "rea": 21468, + "ĠDwar": 21469, + "alyst": 21470, + "ursor": 21471, + "Ġelimination": 21472, + "Ġcrypto": 21473, + "cht": 21474, + "ĠEternal": 21475, + "âĢ¦]": 21476, + "ĠSorce": 21477, + "Ill": 21478, + "NER": 21479, + "Ġuh": 21480, + "Conclusion": 21481, + "wage": 21482, + "Ġrespir": 21483, + "Ġreminis": 21484, + "hetical": 21485, + "Ġgy": 21486, + "Ġutilized": 21487, + "icidal": 21488, + "Ġ1900": 21489, + "Ġhunters": 21490, + "ĠSwan": 21491, + "ĠReact": 21492, + "Ġvisitor": 21493, + "ĠThanksgiving": 21494, + "308": 21495, + "Posts": 21496, + "Ġhips": 21497, + "1997": 21498, + "omers": 21499, + "Ġknocking": 21500, + "ĠVehicle": 21501, + "Ġtil": 21502, + "Ġ138": 21503, + "Ġmi": 21504, + "ĠInvestigation": 21505, + "ĠKenya": 21506, + "Ġcasino": 21507, + "Ġmotives": 21508, + "Ġregain": 21509, + "rex": 21510, + "Ġweekends": 21511, + "Ġstabbed": 21512, + "boro": 21513, + "Ġexploited": 21514, + "ĠHAVE": 21515, + "ĠTelevision": 21516, + "cock": 21517, + "Ġpreparations": 21518, + "Ġendeav": 21519, + "ĠRemote": 21520, + "ĠMaker": 21521, + "ĠProdu": 21522, + "ĠEvan": 21523, + "Ġinformational": 21524, + "ĠLouisville": 21525, + "154": 21526, + "ĠDreams": 21527, + "Ġplots": 21528, + "ĠRunner": 21529, + "Ġhurting": 21530, + "Ġacademy": 21531, + "ĠMontgomery": 21532, + "nm": 21533, + "ĠLanc": 21534, + "ĠAlz": 21535, + "210": 21536, + "elong": 21537, + "Ġretailer": 21538, + "Ġarising": 21539, + "Ġrebellion": 21540, + "Ġblonde": 21541, + "played": 21542, + "Ġinstrumental": 21543, + "Cross": 21544, + "Ġretention": 21545, + "Ġtherapeutic": 21546, + "Ġseas": 21547, + "Ġinfantry": 21548, + "ĠClint": 21549, + "Ġprompting": 21550, + "Ġbitch": 21551, + "Ġstems": 21552, + "ĠKra": 21553, + "Ġthesis": 21554, + "ĠBog": 21555, + "rued": 21556, + "Ġkings": 21557, + "Ġclay": 21558, + "ificent": 21559, + "ĠYES": 21560, + "ĠThing": 21561, + "ĠCubs": 21562, + "veyard": 21563, + "elsh": 21564, + "inarily": 21565, + "ĠEy": 21566, + "ĠRolling": 21567, + "Ġevolving": 21568, + "India": 21569, + "Ġrecognizes": 21570, + "Ġgraduation": 21571, + "isers": 21572, + "Ġfertility": 21573, + "ĠMilan": 21574, + "Command": 21575, + "Ġboxing": 21576, + "Ġ1943": 21577, + "Ġgluten": 21578, + "ĠEmir": 21579, + "Ġidol": 21580, + "Ġconceived": 21581, + "ĠCreation": 21582, + "Merit": 21583, + "uddy": 21584, + "ussions": 21585, + "ĠLieutenant": 21586, + "ietal": 21587, + "Ġunchanged": 21588, + "ĠScale": 21589, + "ĠCrimea": 21590, + "balls": 21591, + "atorial": 21592, + "Ġdepths": 21593, + "Ġempirical": 21594, + "Ġtransm": 21595, + "Ġunsafe": 21596, + "missible": 21597, + "comfort": 21598, + "156": 21599, + "Ġmechanic": 21600, + "002": 21601, + "lins": 21602, + "Ġsmoked": 21603, + "Pos": 21604, + "Ġslowing": 21605, + "Ġlav": 21606, + "Texas": 21607, + "Ġcheating": 21608, + "ĠMetropolitan": 21609, + "ethyl": 21610, + "Ġdiscovering": 21611, + "asse": 21612, + "Ġpencil": 21613, + "ĠPyongyang": 21614, + "Ġcloset": 21615, + "ĠSheet": 21616, + "ĠEntry": 21617, + "oustic": 21618, + "Ġmyst": 21619, + "erate": 21620, + "ariat": 21621, + "Ġminerals": 21622, + "Ġmusician": 21623, + "ĠPul": 21624, + "ĠMaz": 21625, + "249": 21626, + "Ġpermissions": 21627, + "Ġiv": 21628, + "enary": 21629, + "ickers": 21630, + "ĠBing": 21631, + "hea": 21632, + "enable": 21633, + "Ġgriev": 21634, + "Ġasserted": 21635, + "ĠColonel": 21636, + "Ġaffidav": 21637, + "wo": 21638, + "Ġseated": 21639, + "ĠRide": 21640, + "Ġpaintings": 21641, + "ĠPix": 21642, + "Ġ137": 21643, + "ishi": 21644, + "umbai": 21645, + "gotten": 21646, + "ĠEarl": 21647, + "Ġinning": 21648, + "Ġcensus": 21649, + "Ġtravelled": 21650, + "ĠConsult": 21651, + "185": 21652, + "bind": 21653, + "Ġsimplicity": 21654, + "Ġoverlooked": 21655, + "ĠHelpful": 21656, + "Ġmonkey": 21657, + "Ġoverwhelmingly": 21658, + "Blood": 21659, + "ĠFlint": 21660, + "ĠJama": 21661, + "ĠPresent": 21662, + "ĠRage": 21663, + "ĠTA": 21664, + "ptive": 21665, + "Ġturnout": 21666, + "wald": 21667, + "ĠDolphins": 21668, + "ĠVPN": 21669, + "Ġonion": 21670, + "Ġcrafting": 21671, + "mma": 21672, + "ĠMercury": 21673, + "Ġarrange": 21674, + "Ġalerts": 21675, + "ĠOT": 21676, + "zbollah": 21677, + "Ġgases": 21678, + "ĠRichardson": 21679, + "sal": 21680, + "lar": 21681, + "Ġfrost": 21682, + "Ġlowering": 21683, + "Ġacclaim": 21684, + "Ġstartups": 21685, + "ĠGain": 21686, + "essment": 21687, + "Ġguardian": 21688, + "人": 21689, + "ĠPie": 21690, + "ĠLinks": 21691, + "Ġmerits": 21692, + "Ġawake": 21693, + "Ġparental": 21694, + "Ġexceeds": 21695, + "Ġidle": 21696, + "ĠPilot": 21697, + "ĠeBay": 21698, + "ĠAccept": 21699, + "ipeg": 21700, + "Cam": 21701, + "ĠKot": 21702, + "Ġtraders": 21703, + "olitics": 21704, + "unker": 21705, + "ĠPale": 21706, + "osi": 21707, + "anmar": 21708, + "Ġ1947": 21709, + "ĠFell": 21710, + "estial": 21711, + "itating": 21712, + "GF": 21713, + "ĠSr": 21714, + "ifted": 21715, + "Ġconnector": 21716, + "ĠBone": 21717, + "illes": 21718, + "260": 21719, + "hma": 21720, + "Ġoverlap": 21721, + "ĠGitHub": 21722, + "Ġcleaner": 21723, + "ĠBaptist": 21724, + "ĠWAS": 21725, + "Ġlungs": 21726, + "Ñģ": 21727, + "ĠBUT": 21728, + "Ġcite": 21729, + "Ġpitched": 21730, + "reatment": 21731, + "Ġtrophies": 21732, + "ĠNu": 21733, + "386": 21734, + "ĠPride": 21735, + "Ġattendees": 21736, + "[]": 21737, + "179": 21738, + "Ġspatial": 21739, + "Ġprizes": 21740, + "ĠReligion": 21741, + "Ġshowcase": 21742, + "ĠCategory": 21743, + "vidia": 21744, + "Target": 21745, + "Property": 21746, + "?,": 21747, + "Ġfusion": 21748, + "pie": 21749, + "ĠUCLA": 21750, + "Ġsoundtrack": 21751, + "Ġprincess": 21752, + "ĠCaval": 21753, + "should": 21754, + "Ġlimbs": 21755, + "Background": 21756, + "Ġlonely": 21757, + "Ġcores": 21758, + "ĠTail": 21759, + "sheet": 21760, + "Ġ132": 21761, + "Ra": 21762, + "ãĤ«": 21763, + "ĠBolt": 21764, + "Ġbooked": 21765, + "Ġadminister": 21766, + "Ġequals": 21767, + "wy": 21768, + "Ġobserving": 21769, + "ĠBaron": 21770, + "ĠAdobe": 21771, + "Ġvirgin": 21772, + "ĠSocialist": 21773, + "Move": 21774, + "ghazi": 21775, + "ĠLinda": 21776, + "212": 21777, + "Ġbrewing": 21778, + "Ġmerchants": 21779, + "burse": 21780, + "Ġdivor": 21781, + "Ġmetals": 21782, + "ĠNer": 21783, + "Ġsums": 21784, + "ĠEnemy": 21785, + "Ġenvision": 21786, + "Ġgranting": 21787, + "ĠHoney": 21788, + "ĠSkyrim": 21789, + "Ġsocio": 21790, + "graded": 21791, + "Ġselective": 21792, + "WASHINGTON": 21793, + "Ġ1948": 21794, + "ĠSirius": 21795, + "ĠGross": 21796, + "activity": 21797, + "ĠIvan": 21798, + "Ġfurious": 21799, + "BSD": 21800, + "ĠPrevious": 21801, + "Ġresponsive": 21802, + "Ġcharitable": 21803, + "Ġleaning": 21804, + "ĠPew": 21805, + "Ġviolates": 21806, + "\\\\\\\\\\\\\\\\": 21807, + "ĠComing": 21808, + "wire": 21809, + "Ġpoet": 21810, + "Ġresolutions": 21811, + "command": 21812, + "ĠPortuguese": 21813, + "Ġnickname": 21814, + "Ġdeaf": 21815, + "February": 21816, + "Ġrecognise": 21817, + "Ġentirety": 21818, + "Ġseasonal": 21819, + "placed": 21820, + "ĠTelegraph": 21821, + "Ġmicrophone": 21822, + "ouring": 21823, + "Ġgrains": 21824, + "Ġgoverned": 21825, + "Ġpostp": 21826, + "ĠWaters": 21827, + "inement": 21828, + "Ġundocumented": 21829, + "ĠComcast": 21830, + "Ġfox": 21831, + "Ġassaults": 21832, + "reon": 21833, + "many": 21834, + "ĠJenkins": 21835, + "ĠAnyway": 21836, + "Ġassessments": 21837, + "Ġdowns": 21838, + "ĠMouse": 21839, + "Ġsuperb": 21840, + "kt": 21841, + "ĠDow": 21842, + "Ġtaxation": 21843, + "401": 21844, + "Ġsmiles": 21845, + "Ġundertaken": 21846, + "Ġexh": 21847, + "Ġenthusiastic": 21848, + "Ġtwent": 21849, + "Ġgovernmental": 21850, + "Ġautonomy": 21851, + "ĠTechnologies": 21852, + "ĠChain": 21853, + "Ġprevalent": 21854, + "fb": 21855, + "Ġnicotine": 21856, + "ogram": 21857, + "job": 21858, + "Ġawaiting": 21859, + "ĠMenu": 21860, + "Ġdeputies": 21861, + "kov": 21862, + "ishops": 21863, + "Button": 21864, + "ĠShanghai": 21865, + "Ġdiesel": 21866, + "ĠDuck": 21867, + "Ryan": 21868, + "ĠPCs": 21869, + "NF": 21870, + "jury": 21871, + "ente": 21872, + "Ġinaccurate": 21873, + "eddy": 21874, + "Whatever": 21875, + "Ġshowc": 21876, + "ĠNad": 21877, + "odus": 21878, + "etr": 21879, + "Ġplaintiffs": 21880, + "ĠWOR": 21881, + "ĠAssange": 21882, + "Ġprivat": 21883, + "Ġpremiums": 21884, + "Ġtam": 21885, + "URL": 21886, + "Ġelites": 21887, + "ĠRanger": 21888, + "ottenham": 21889, + "ĠHoff": 21890, + "ĠAthens": 21891, + "Ġdefinite": 21892, + "Ġsighed": 21893, + "Ġevenly": 21894, + "211": 21895, + "ĠAmber": 21896, + "akia": 21897, + "Ġmailing": 21898, + "Ġcrashing": 21899, + "ĠConfederate": 21900, + "rugged": 21901, + "Wal": 21902, + "ĠDepths": 21903, + "Ġjuvenile": 21904, + "Ġreactor": 21905, + "Introduction": 21906, + "ĠDeluxe": 21907, + "1995": 21908, + "ĠSanchez": 21909, + "ĠMead": 21910, + "ivable": 21911, + ":-": 21912, + "ĠPlanning": 21913, + "ĠTrap": 21914, + "quin": 21915, + "ĠProtect": 21916, + "vered": 21917, + "Information": 21918, + "Ġkidney": 21919, + "innamon": 21920, + "las": 21921, + "Ġpolicing": 21922, + "Ġtolerate": 21923, + "ĠQi": 21924, + "Ġbiased": 21925, + "Fort": 21926, + "ĠKi": 21927, + "save": 21928, + "Ġprivileged": 21929, + "Ġbeasts": 21930, + "ĠGlas": 21931, + "ĠCinem": 21932, + "Ġcomeback": 21933, + "Sunday": 21934, + "Ġextinction": 21935, + "hops": 21936, + "Ġtransmit": 21937, + "Ġdoubles": 21938, + "ĠFlat": 21939, + "167": 21940, + "Ġdisputed": 21941, + "Ġinjustice": 21942, + "foo": 21943, + "Vict": 21944, + "roleum": 21945, + "ĠJulie": 21946, + "Context": 21947, + "ĠRarity": 21948, + "issue": 21949, + "Component": 21950, + "Ġcounseling": 21951, + "anne": 21952, + "dark": 21953, + "Ġobjections": 21954, + "uilt": 21955, + "Ġgast": 21956, + "Ġplac": 21957, + "Ġunused": 21958, + "ãĥĩ": 21959, + "ĠTrial": 21960, + "ĠJas": 21961, + "hedral": 21962, + "obb": 21963, + "Ġtemporal": 21964, + "ĠPRO": 21965, + "ĠNW": 21966, + "ĠAnniversary": 21967, + "Large": 21968, + "Ġtherm": 21969, + "Ġdavid": 21970, + "Ġsystemic": 21971, + "ĠShir": 21972, + "mut": 21973, + "ĠNept": 21974, + "address": 21975, + "Ġscanning": 21976, + "Ġunderstandable": 21977, + "Ġcanvas": 21978, + "Cat": 21979, + "ĠZoo": 21980, + "Ġangels": 21981, + "LO": 21982, + "ĠStatement": 21983, + "ĠSig": 21984, + "ovable": 21985, + "ĠAway": 21986, + "sharing": 21987, + "ocrats": 21988, + "stated": 21989, + "Ġweighing": 21990, + "Nor": 21991, + "wild": 21992, + "Bey": 21993, + "Ġastonishing": 21994, + "ĠReynolds": 21995, + "Ġopener": 21996, + "Ġtrainer": 21997, + "Ġsurgical": 21998, + "pn": 21999, + "Ġadjusting": 22000, + "wheel": 22001, + "Ġfrown": 22002, + "ervative": 22003, + "Ġsuspend": 22004, + "Within": 22005, + "tein": 22006, + "Ġobstacle": 22007, + "Ġliberties": 22008, + "ymes": 22009, + "Ġuranium": 22010, + "ansom": 22011, + "anol": 22012, + "uba": 22013, + "ĠLoss": 22014, + "Ġarous": 22015, + "ĠHenderson": 22016, + "Wow": 22017, + "spl": 22018, + "cur": 22019, + "ĠÂŃ": 22020, + "Ġtheirs": 22021, + "Damage": 22022, + "Ġdownloading": 22023, + "Ġdiscern": 22024, + "ĠSto": 22025, + "ĠFla": 22026, + "Ġhath": 22027, + "ĠAj": 22028, + "Ġunpleasant": 22029, + "European": 22030, + "expensive": 22031, + "Ġscreenshot": 22032, + "ĠUV": 22033, + "Ġallied": 22034, + "ĠPersian": 22035, + "Ġmonopoly": 22036, + "Ġatom": 22037, + "ĠRedskins": 22038, + "\"><": 22039, + "Ġcancell": 22040, + "Ġcinema": 22041, + "131": 22042, + "fair": 22043, + "ĠAlfred": 22044, + "Ġduck": 22045, + "args": 22046, + "223": 22047, + "ĠISI": 22048, + "Ġsignaling": 22049, + "inar": 22050, + "Ġlaughs": 22051, + "Ġforwards": 22052, + "Ġreckless": 22053, + "Ġlisteners": 22054, + "ativity": 22055, + "Ġvastly": 22056, + "nant": 22057, + "Less": 22058, + "ĠHunting": 22059, + "ĠScientific": 22060, + "ITED": 22061, + "Ġknight": 22062, + "ĠHTC": 22063, + "usa": 22064, + "tmp": 22065, + "Ġrude": 22066, + "ĠLegendary": 22067, + "Ġarises": 22068, + "Bad": 22069, + "ĠClaim": 22070, + "peg": 22071, + "Ġrealities": 22072, + "Think": 22073, + "Ġ°": 22074, + "Ġrode": 22075, + "Ġstrive": 22076, + "Ġanecd": 22077, + "Ġshorts": 22078, + "Ġhypothes": 22079, + "Ġcoordinated": 22080, + "ĠGandhi": 22081, + "ĠFPS": 22082, + "RED": 22083, + "Ġsusceptible": 22084, + "Ġshrink": 22085, + "ĠChart": 22086, + "Help": 22087, + "Ġion": 22088, + "deep": 22089, + "ribes": 22090, + "ĠKai": 22091, + "ĠCustomer": 22092, + "Summary": 22093, + "Ġcough": 22094, + "wife": 22095, + "Ġlend": 22096, + "Ġpositioning": 22097, + "Ġlottery": 22098, + "ĠCanyon": 22099, + "Ġfade": 22100, + "Ġbronze": 22101, + "ĠKenny": 22102, + "Ġboasts": 22103, + "ĠEnhanced": 22104, + "record": 22105, + "Ġemergence": 22106, + "Ġakin": 22107, + "ĠBert": 22108, + "itous": 22109, + "âĸij": 22110, + "Ġstip": 22111, + "Ġexchanged": 22112, + "omore": 22113, + "alsh": 22114, + "Ġreservoir": 22115, + "Ġstandpoint": 22116, + "WM": 22117, + "Ġinitiate": 22118, + "Ġdecay": 22119, + "Ġbrewery": 22120, + "Ġterribly": 22121, + "Ġmortal": 22122, + "levard": 22123, + "Ġrevis": 22124, + "NI": 22125, + "elo": 22126, + "Ġconfess": 22127, + "ĠMSNBC": 22128, + "Ġsubmissions": 22129, + "Controller": 22130, + "Ġ202": 22131, + "ĠRuth": 22132, + "});": 22133, + "ĠAzure": 22134, + "Ġ.\"": 22135, + "206": 22136, + "ĠMarketing": 22137, + "Ġlaund": 22138, + "iencies": 22139, + "Ġrenowned": 22140, + "ĠTrou": 22141, + "ĠNGO": 22142, + "blems": 22143, + "Ġterrified": 22144, + "Ġwarns": 22145, + "Ġpert": 22146, + "Ġunsure": 22147, + "480": 22148, + "alez": 22149, + "ultz": 22150, + "ĠOutside": 22151, + "Ġstyl": 22152, + "ĠUnderground": 22153, + "Ġpanc": 22154, + "Ġdictionary": 22155, + "Ġfoe": 22156, + "riminal": 22157, + "ĠNorwegian": 22158, + "Ġjailed": 22159, + "Ġmaternal": 22160, + "ée": 22161, + "ĠLucy": 22162, + "cop": 22163, + "Cho": 22164, + "Ġunsigned": 22165, + "ĠZelda": 22166, + "ĠInsider": 22167, + "ĠContinued": 22168, + "Ġ133": 22169, + "ĠNaruto": 22170, + "ĠMajority": 22171, + "169": 22172, + "ĠWo": 22173, + "ãĤĵ": 22174, + "Ġpastor": 22175, + "Ġinformal": 22176, + "н": 22177, + "anthrop": 22178, + "join": 22179, + "ãģĹ": 22180, + "itational": 22181, + "NP": 22182, + "ĠWriting": 22183, + "fn": 22184, + "ĠBever": 22185, + "195": 22186, + "Ġyelling": 22187, + "Ġdrastically": 22188, + "Ġeject": 22189, + "Ġneut": 22190, + "Ġthrive": 22191, + "ĠFrequ": 22192, + "oux": 22193, + "Ġpossesses": 22194, + "ĠSenators": 22195, + "ĠDES": 22196, + "ĠShakespeare": 22197, + "ĠFranco": 22198, + "ĠLB": 22199, + "uchi": 22200, + "Ġincarn": 22201, + "Ġfounders": 22202, + "Function": 22203, + "Ġbrightness": 22204, + "ĠBT": 22205, + "Ġwhale": 22206, + "ĠTheater": 22207, + "mass": 22208, + "ĠDoll": 22209, + "Something": 22210, + "Ġechoed": 22211, + "ĠHex": 22212, + "crit": 22213, + "afia": 22214, + "Ġgoddess": 22215, + "Ġeleven": 22216, + "ĠPreview": 22217, + "ĠAurora": 22218, + "Ġ401": 22219, + "ulsive": 22220, + "ĠLogan": 22221, + "inburgh": 22222, + "ĠCenters": 22223, + "ĠONLY": 22224, + "ĠAid": 22225, + "Ġparadox": 22226, + "Ġhurd": 22227, + "ĠLC": 22228, + "Due": 22229, + "court": 22230, + "Ġoffended": 22231, + "Ġevaluating": 22232, + "ĠMatthews": 22233, + "Ġtomb": 22234, + "Ġpayroll": 22235, + "Ġextraction": 22236, + "ĠHands": 22237, + "ifi": 22238, + "Ġsupernatural": 22239, + "ĠCOMM": 22240, + "]=": 22241, + "dogs": 22242, + "Ġ512": 22243, + "ĠMeeting": 22244, + "Richard": 22245, + "ĠMaximum": 22246, + "Ġideals": 22247, + "Things": 22248, + "mand": 22249, + "ĠRegardless": 22250, + "Ġhumili": 22251, + "buffer": 22252, + "Little": 22253, + "ĠDani": 22254, + "ĠNak": 22255, + "Ġliberation": 22256, + "ĠAbe": 22257, + "ĠOL": 22258, + "Ġstuffed": 22259, + "aca": 22260, + "inda": 22261, + "raphic": 22262, + "Ġmosqu": 22263, + "Ġcampaigning": 22264, + "Ġoccupy": 22265, + "Squ": 22266, + "rina": 22267, + "ĠWel": 22268, + "ĠVS": 22269, + "Ġphysic": 22270, + "Ġpuls": 22271, + "rint": 22272, + "oaded": 22273, + "ETF": 22274, + "ĠArchives": 22275, + "Ġvenues": 22276, + "hner": 22277, + "ĠTurbo": 22278, + "Ġlust": 22279, + "Ġappealed": 22280, + "quez": 22281, + "ilib": 22282, + "ĠTimothy": 22283, + "Ġomn": 22284, + "dro": 22285, + "Ġobsession": 22286, + "ĠSavage": 22287, + "1996": 22288, + "Global": 22289, + "Jes": 22290, + "214": 22291, + "Ġsliding": 22292, + "Ġdisappro": 22293, + "ĠMagical": 22294, + "Ġvoluntarily": 22295, + "gb": 22296, + "aney": 22297, + "Ġprophet": 22298, + "ĠRein": 22299, + "ĠJulia": 22300, + "ĠWorth": 22301, + "aurus": 22302, + "Ġbounds": 22303, + "ieu": 22304, + ")))": 22305, + "Ġcrore": 22306, + "ĠCitizen": 22307, + "Sky": 22308, + "Ġcolumnist": 22309, + "Ġseekers": 22310, + "ondo": 22311, + "ISA": 22312, + "ĠLength": 22313, + "Ġnostalg": 22314, + "Ġnewcom": 22315, + "Ġdetrim": 22316, + "entric": 22317, + "375": 22318, + "ĠGE": 22319, + "Ġautop": 22320, + "Ġacademics": 22321, + "AppData": 22322, + "ĠShen": 22323, + "Ġidiot": 22324, + "ĠTransit": 22325, + "Ġteaspoon": 22326, + "Wil": 22327, + "KO": 22328, + "ĠComedy": 22329, + ">,": 22330, + "Ġpopulated": 22331, + "WD": 22332, + "Ġpigs": 22333, + "ĠOculus": 22334, + "Ġsympathetic": 22335, + "Ġmarathon": 22336, + "198": 22337, + "Ġseizure": 22338, + "sided": 22339, + "Ġdop": 22340, + "irtual": 22341, + "Land": 22342, + "ĠFloor": 22343, + "osaurs": 22344, + "...]": 22345, + "Ġlos": 22346, + "Ġsubsidiary": 22347, + "EY": 22348, + "ĠParts": 22349, + "ĠStef": 22350, + "ĠJudiciary": 22351, + "Ġ134": 22352, + "Ġmirrors": 22353, + "Ġket": 22354, + "times": 22355, + "Ġneurolog": 22356, + "Ġcav": 22357, + "ĠGuest": 22358, + "Ġtumor": 22359, + "scill": 22360, + "ĠLloyd": 22361, + "Est": 22362, + "Ġclearer": 22363, + "Ġstereotypes": 22364, + "Ġdur": 22365, + "nothing": 22366, + "Reddit": 22367, + "Ġnegotiated": 22368, + "------------------------": 22369, + "235": 22370, + "Ġflown": 22371, + "ĠSeoul": 22372, + "ĠResident": 22373, + "ĠSCH": 22374, + "Ġdisappearance": 22375, + "ĠVince": 22376, + "grown": 22377, + "Ġgrabs": 22378, + "ril": 22379, + "ĠInfinite": 22380, + "ĠTwenty": 22381, + "Ġpedestrian": 22382, + "Ġjersey": 22383, + "ĠFur": 22384, + "ĠInfinity": 22385, + "ĠElliott": 22386, + "Ġmentor": 22387, + "Ġmorally": 22388, + "Ġobey": 22389, + "secure": 22390, + "iffe": 22391, + "Ġantibiotics": 22392, + "angled": 22393, + "ĠFreeman": 22394, + "ĠIntroduction": 22395, + "Jun": 22396, + "Ġmarsh": 22397, + "icans": 22398, + "ĠEVENTS": 22399, + "ochond": 22400, + "Wall": 22401, + "iculty": 22402, + "Ġmisdemeanor": 22403, + "Ġly": 22404, + "Thomas": 22405, + "ĠResolution": 22406, + "Ġanimations": 22407, + "ĠDry": 22408, + "Ġintercourse": 22409, + "ĠNewcastle": 22410, + "ĠHog": 22411, + "ĠEquipment": 22412, + "177": 22413, + "Ġterritorial": 22414, + "Ġarchives": 22415, + "203": 22416, + "Filter": 22417, + "ĠMunich": 22418, + "Ġcommanded": 22419, + "ĠWand": 22420, + "Ġpitches": 22421, + "ĠCroat": 22422, + "Ġratios": 22423, + "ĠMits": 22424, + "Ġaccumulated": 22425, + "ĠSpecifically": 22426, + "Ġgentleman": 22427, + "acerb": 22428, + "Ġpenn": 22429, + "Ġaka": 22430, + "ĠFuk": 22431, + "Ġintervene": 22432, + "ĠRefuge": 22433, + "ĠAlzheimer": 22434, + "Ġsuccession": 22435, + "ohan": 22436, + "does": 22437, + "Lord": 22438, + "Ġseparat": 22439, + "Ġcorrespondence": 22440, + "Ġshiny": 22441, + "Prior": 22442, + "Ġsulf": 22443, + "Ġmiserable": 22444, + "Ġdedication": 22445, + "().": 22446, + "Ġspecialists": 22447, + "Ġdefects": 22448, + "ĠCult": 22449, + "ĠXia": 22450, + "Ġjeopard": 22451, + "ĠOre": 22452, + "Ability": 22453, + "Ġlear": 22454, + "Ġambitions": 22455, + "ĠBMI": 22456, + "ĠArabs": 22457, + "Ġ1942": 22458, + "Ġpreservation": 22459, + "ificate": 22460, + "Ġashamed": 22461, + "loss": 22462, + "ĠRestaur": 22463, + "Ġresemble": 22464, + "Ġenrich": 22465, + "ĠKN": 22466, + "ĠClan": 22467, + "float": 22468, + "Ġplayable": 22469, + "ITT": 22470, + "Ġharmony": 22471, + "arrison": 22472, + "ĠWeinstein": 22473, + "were": 22474, + "Ġpoisoning": 22475, + "ĠComput": 22476, + "ĠWordPress": 22477, + "major": 22478, + "ĠValve": 22479, + "Fan": 22480, + "ĠThrow": 22481, + "ĠRomans": 22482, + "ĠDepression": 22483, + "ados": 22484, + "Ġtortured": 22485, + "Ġbalancing": 22486, + "bottom": 22487, + "Ġacquiring": 22488, + "ĠMonte": 22489, + "ardi": 22490, + "Ġaura": 22491, + "Ġ##": 22492, + "ĠStanding": 22493, + "ĠAtlas": 22494, + "CF": 22495, + "Ġintrins": 22496, + "ĠBenghazi": 22497, + "Ġcamping": 22498, + "Ġtapped": 22499, + "blade": 22500, + "strous": 22501, + "ĠRabb": 22502, + "ĠWritten": 22503, + "tip": 22504, + "ĠNeigh": 22505, + "sterdam": 22506, + "ĠAllow": 22507, + "ĠHealing": 22508, + "ĠRhod": 22509, + "num": 22510, + "Ġcaffeine": 22511, + "ĠPercent": 22512, + "Ġboo": 22513, + "Ġapples": 22514, + "305": 22515, + "Ġwelcoming": 22516, + "Ġapplaud": 22517, + "Ġausterity": 22518, + "±": 22519, + "ĠReality": 22520, + "efe": 22521, + "å®": 22522, + "Ġsucks": 22523, + "Ġtabs": 22524, + "ĠPayPal": 22525, + "Ġbackpack": 22526, + "Ġgifted": 22527, + "abulary": 22528, + "ĠScout": 22529, + "irteen": 22530, + "Ġchin": 22531, + "Ġomitted": 22532, + "Ġnegatively": 22533, + "Ġaccessing": 22534, + "ĠEarn": 22535, + "Ġambulance": 22536, + "Ġheadphones": 22537, + "Ġ205": 22538, + "ĠRefresh": 22539, + "president": 22540, + "ĠKitchen": 22541, + "ĠEntered": 22542, + "ĠSnyder": 22543, + "005": 22544, + "omical": 22545, + "Ġborrowed": 22546, + "ĠNem": 22547, + "Ġaviation": 22548, + "Ġstall": 22549, + "rimination": 22550, + "Ġuniforms": 22551, + "itime": 22552, + "ĠSimmons": 22553, + "energy": 22554, + "ablished": 22555, + "yy": 22556, + "qualified": 22557, + "Ġrallies": 22558, + "ĠStuart": 22559, + "flight": 22560, + "Ġgangs": 22561, + "rag": 22562, + "Ġvault": 22563, + "lux": 22564, + "ĠCompar": 22565, + "Ġdesignation": 22566, + "209": 22567, + "ĠJos": 22568, + "dollar": 22569, + "zero": 22570, + "Ġwells": 22571, + "303": 22572, + "Ġconstituents": 22573, + "Ġheck": 22574, + "Ġcows": 22575, + "Ġcommanders": 22576, + "Ġdifferential": 22577, + "ĠCatherine": 22578, + "299": 22579, + "Ġvalve": 22580, + "Ġbrace": 22581, + "Ġperspectives": 22582, + "cert": 22583, + "fact": 22584, + "icularly": 22585, + "ĠMcN": 22586, + "planes": 22587, + "Ġintric": 22588, + "Ġpeas": 22589, + "ovan": 22590, + "Ġtossed": 22591, + "retch": 22592, + "ĠLopez": 22593, + "Ġunfamiliar": 22594, + "death": 22595, + "ĠApart": 22596, + "ĠChang": 22597, + "Ġrelieved": 22598, + "rophe": 22599, + "Ġairports": 22600, + "Ġfreak": 22601, + "util": 22602, + "Mill": 22603, + "ĠChin": 22604, + "ĠOwen": 22605, + "male": 22606, + "ĠBroken": 22607, + "ĠWinds": 22608, + "rob": 22609, + "rising": 22610, + "Ġfirefighters": 22611, + "Ġauthoritarian": 22612, + "Ġ148": 22613, + "Bitcoin": 22614, + "external": 22615, + "Ġbrowsers": 22616, + "ichever": 22617, + "orian": 22618, + "Ġunb": 22619, + "Ġpoke": 22620, + "ĠZot": 22621, + "Mid": 22622, + "ĠPopular": 22623, + "Ġcovert": 22624, + "Ġcontributes": 22625, + "Ġ650": 22626, + "Ġcontention": 22627, + "Gate": 22628, + "Ġconsoles": 22629, + "Ġchromos": 22630, + "ĠIX": 22631, + "Ġvisually": 22632, + "ĠEisen": 22633, + "Ġjewelry": 22634, + "Ġdelegation": 22635, + "Ġaccelerate": 22636, + "ĠRiley": 22637, + "Ġslope": 22638, + "Ġindoor": 22639, + "itially": 22640, + "Ġhugely": 22641, + "Ġtunnels": 22642, + "Ġfined": 22643, + "Ġdirective": 22644, + "Ġforehead": 22645, + "ustomed": 22646, + "Ġskate": 22647, + "Music": 22648, + "gas": 22649, + "Ġrecognizing": 22650, + "ambo": 22651, + "Ġoverweight": 22652, + "ĠGrade": 22653, + "ÙĬ": 22654, + "Ġsounding": 22655, + "Ġlocking": 22656, + "ĠREM": 22657, + "Store": 22658, + "Ġexcav": 22659, + "ĠLikewise": 22660, + "ĠLights": 22661, + "Ġelbow": 22662, + "ĠSupply": 22663, + "wic": 22664, + "Ġhandsome": 22665, + "1994": 22666, + "Coll": 22667, + "Ġadequately": 22668, + "ĠAssociate": 22669, + "Ġstrips": 22670, + "Ġcrackdown": 22671, + "Ġmarvel": 22672, + "ĠKun": 22673, + "Ġpassages": 22674, + "@@@@": 22675, + "ĠTall": 22676, + "Ġthoughtful": 22677, + "namese": 22678, + "Ġprostitution": 22679, + "business": 22680, + "Ġballistic": 22681, + "personal": 22682, + "cig": 22683, + "izational": 22684, + "Round": 22685, + "ĠÂłĠÂłĠÂłĠÂł": 22686, + "ĠColeman": 22687, + "Ġadmitting": 22688, + "ĠPlug": 22689, + "Ġbitcoins": 22690, + "ĠSuz": 22691, + "Ġfairness": 22692, + "Ġsupplier": 22693, + "Ġcatastrophic": 22694, + "ĠHelen": 22695, + "oqu": 22696, + "Marc": 22697, + "ĠArticles": 22698, + "gie": 22699, + "Ġendangered": 22700, + "Ġdestiny": 22701, + "ĠVolt": 22702, + "olia": 22703, + "axis": 22704, + "Ġcheat": 22705, + "Ġunified": 22706, + "ICO": 22707, + "quote": 22708, + "302": 22709, + "ĠSed": 22710, + "Ġsuppression": 22711, + "Ġanalyzing": 22712, + "Ġsquat": 22713, + "Ġfiguring": 22714, + "Ġcoordinates": 22715, + "Ġchunks": 22716, + "Ġ1946": 22717, + "Ġsubp": 22718, + "Ġwiki": 22719, + "ĠForbes": 22720, + "ĠJupiter": 22721, + "ĠErik": 22722, + "imer": 22723, + "ĠCommercial": 22724, + "\\)": 22725, + "Ġlegitimacy": 22726, + "Ġdental": 22727, + "ĠMean": 22728, + "Ġdeficits": 22729, + "550": 22730, + "Originally": 22731, + "ĠHorror": 22732, + "Ġcontamination": 22733, + "llah": 22734, + "Ġconfisc": 22735, + "ĠClare": 22736, + "TB": 22737, + "ĠFailed": 22738, + "aned": 22739, + "Ġruler": 22740, + "ĠController": 22741, + "Ġfeminists": 22742, + "Fix": 22743, + "gay": 22744, + "207": 22745, + "Ġrabbit": 22746, + "Third": 22747, + "owntown": 22748, + "Ġglue": 22749, + "Ġvolatile": 22750, + "Ġshining": 22751, + "Ġfoll": 22752, + "Ġimpaired": 22753, + "Ġsupers": 22754, + "æĪ": 22755, + "Ġclutch": 22756, + "ļéĨĴ": 22757, + "Ġprolet": 22758, + "Ġ(!": 22759, + "Ġyelled": 22760, + "ĠKiev": 22761, + "ĠErn": 22762, + "ĠShock": 22763, + "KB": 22764, + "Ġsituated": 22765, + "query": 22766, + "ĠNas": 22767, + "Ġannex": 22768, + "character": 22769, + "ĠHoliday": 22770, + "Ġautomation": 22771, + "ĠJill": 22772, + "ĠRemastered": 22773, + "Ġlinem": 22774, + "Ġwilderness": 22775, + "ĠHorizon": 22776, + "ĠGuinea": 22777, + "AZ": 22778, + "Ġmainland": 22779, + "Ġsecrecy": 22780, + "LEASE": 22781, + "Ġpunk": 22782, + "ĠProvince": 22783, + "(),": 22784, + "Speed": 22785, + "Ġhanding": 22786, + "ĠSebast": 22787, + "Sir": 22788, + "rase": 22789, + "Ġjournals": 22790, + "Ġcongest": 22791, + "ĠTut": 22792, + "irrel": 22793, + "Ġschizophrenia": 22794, + "Ġmisogyn": 22795, + "healthy": 22796, + "Iron": 22797, + "Ġreacted": 22798, + "-$": 22799, + "252": 22800, + "Ġplural": 22801, + "Ġplum": 22802, + "Ġbargain": 22803, + "Ġgrounded": 22804, + "finder": 22805, + "Ġdisse": 22806, + "ĠLaz": 22807, + "OOD": 22808, + "Ġatroc": 22809, + "Factory": 22810, + "Ġminions": 22811, + "Ġori": 22812, + "ĠBrave": 22813, + "ĠPRE": 22814, + "ĠMyanmar": 22815, + "ĠHod": 22816, + "Ġexpedition": 22817, + "Ġexplode": 22818, + "ĠCoord": 22819, + "Ġextr": 22820, + "ĠBrief": 22821, + "ĠADHD": 22822, + "Ġhardcore": 22823, + "feeding": 22824, + "Ġdile": 22825, + "ĠFruit": 22826, + "Ġvaccination": 22827, + "ĠMao": 22828, + "osphere": 22829, + "Ġcontests": 22830, + "-|": 22831, + "Ġfren": 22832, + "isphere": 22833, + "Rom": 22834, + "ĠSharp": 22835, + "ĠTrend": 22836, + "Ġdisconnect": 22837, + "âĢ¢âĢ¢": 22838, + "Ġpersecution": 22839, + "Earth": 22840, + "Ġhealthier": 22841, + "384": 22842, + "Ġcob": 22843, + "ĠTrinity": 22844, + "OWS": 22845, + "ANN": 22846, + "Ġspecialty": 22847, + "Ġgru": 22848, + "Ġcooperative": 22849, + "why": 22850, + "Starting": 22851, + "ĠIssues": 22852, + "stre": 22853, + "ensor": 22854, + "Ġ185": 22855, + "Adv": 22856, + "!?": 22857, + "ĠRevel": 22858, + "emia": 22859, + "ĠHulk": 22860, + "Ġcelebrations": 22861, + "ĠSou": 22862, + "raud": 22863, + "ĠKlein": 22864, + "Ġunreal": 22865, + "context": 22866, + "Ġpartnerships": 22867, + "Ġadopting": 22868, + "tical": 22869, + "Ġsplash": 22870, + "ĠHezbollah": 22871, + "category": 22872, + "cyclop": 22873, + "xton": 22874, + "ĠDot": 22875, + "urdy": 22876, + "tz": 22877, + "Ġenvelope": 22878, + "ĠNL": 22879, + "âķ": 22880, + "Ġwherein": 22881, + "Spec": 22882, + "184": 22883, + "Ġtelev": 22884, + "aliation": 22885, + "Ġmyths": 22886, + "å°": 22887, + "Ġrigorous": 22888, + "Ġcommunicating": 22889, + "Ġobserver": 22890, + "Ġrehe": 22891, + "ĠWash": 22892, + "Ġapologized": 22893, + "ĠTin": 22894, + "Ġexpenditures": 22895, + "workers": 22896, + "document": 22897, + "Ġhesitate": 22898, + "ĠLenin": 22899, + "Ġunpredictable": 22900, + "Ġrenewal": 22901, + "cler": 22902, + "okia": 22903, + "ĠCONT": 22904, + "Ġpostseason": 22905, + "Tokens": 22906, + "Ġexacerb": 22907, + "Ġbetting": 22908, + "Ġ147": 22909, + "Ġelevation": 22910, + "Wood": 22911, + "ĠSolomon": 22912, + "194": 22913, + "004": 22914, + "output": 22915, + "Ġredund": 22916, + "ĠMumbai": 22917, + "ĠpH": 22918, + "Ġreproduce": 22919, + "ĠDuration": 22920, + "MAX": 22921, + "Ġbog": 22922, + "CBS": 22923, + "ĠBalance": 22924, + "ĠSgt": 22925, + "ĠRecent": 22926, + "Ġcd": 22927, + "Ġpopped": 22928, + "Ġincompet": 22929, + "prop": 22930, + "ayan": 22931, + "guy": 22932, + "Pacific": 22933, + "Ġtyr": 22934, + "Ġ{{": 22935, + "ĠMystic": 22936, + "ĠDana": 22937, + "Ġmasturb": 22938, + "Ġgeometry": 22939, + "â": 22940, + "ĠCorrect": 22941, + "Ġtrajectory": 22942, + "Ġdistracted": 22943, + "Ġfoo": 22944, + "ĠWelsh": 22945, + "Luc": 22946, + "mith": 22947, + "Ġrugby": 22948, + "Ġrespiratory": 22949, + "Ġtriangle": 22950, + "Ġ215": 22951, + "Ġundergraduate": 22952, + "ĠSuperior": 22953, + "changing": 22954, + "_-": 22955, + "Ġrightly": 22956, + "Ġreferee": 22957, + "Ġlucrative": 22958, + "Ġunauthorized": 22959, + "Ġresembles": 22960, + "ĠGNU": 22961, + "ĠDerby": 22962, + "Ġpathways": 22963, + "ĠLed": 22964, + "Ġendurance": 22965, + "Ġstint": 22966, + "Ġcollector": 22967, + "Fast": 22968, + "Ġdots": 22969, + "Ġnationals": 22970, + "ĠSecurities": 22971, + "Ġwhip": 22972, + "Param": 22973, + "Ġlearns": 22974, + "Magic": 22975, + "Ġdetailing": 22976, + "moon": 22977, + "Ġbroadcasting": 22978, + "Ġbaked": 22979, + "265": 22980, + "holm": 22981, + "ĠSah": 22982, + "ĠHussein": 22983, + "ĠCourtesy": 22984, + "174": 22985, + "Ġ146": 22986, + "Ġgeographic": 22987, + "peace": 22988, + "Ġjudging": 22989, + "ĠStern": 22990, + "Bur": 22991, + "Ġstoryline": 22992, + "Gun": 22993, + "ĠStick": 22994, + "245": 22995, + "307": 22996, + "ãĤ´ãĥ³": 22997, + "ĠAdministrator": 22998, + "Ġburnt": 22999, + "Ġpave": 23000, + "choes": 23001, + "Exec": 23002, + "Ġcampuses": 23003, + "Result": 23004, + "Ġmutations": 23005, + "ĠCharter": 23006, + "Ġcaptures": 23007, + "Ġcompares": 23008, + "Ġbadge": 23009, + "Scient": 23010, + "Ġerad": 23011, + "iery": 23012, + "oi": 23013, + "ettes": 23014, + "ĠEstate": 23015, + "Ġstrap": 23016, + "Ġproudly": 23017, + "Ġfried": 23018, + "Ġwithdrawn": 23019, + "ĠVoy": 23020, + "phony": 23021, + "Items": 23022, + "ĠPierce": 23023, + "bard": 23024, + "Ġannotation": 23025, + "anton": 23026, + "illon": 23027, + "Impro": 23028, + "...)": 23029, + "Ġhappier": 23030, + "------": 23031, + "adjust": 23032, + "Ġstaffers": 23033, + "Ġactivism": 23034, + "Ġperf": 23035, + "Ġalright": 23036, + "Need": 23037, + "Ġcommence": 23038, + "Ġopioid": 23039, + "ĠAmanda": 23040, + "Es": 23041, + "ĠPars": 23042, + "ĠKaw": 23043, + "Works": 23044, + "248": 23045, + "Ġindo": 23046, + "tc": 23047, + "endant": 23048, + "ĠMoto": 23049, + "Ġlegalization": 23050, + "OTE": 23051, + "Ġtasked": 23052, + "Ġtsp": 23053, + "ĠACTIONS": 23054, + "166": 23055, + "Ġrefreshing": 23056, + "ĠNR": 23057, + "ĠPerez": 23058, + "Ġinfringement": 23059, + "SY": 23060, + "Listen": 23061, + "inning": 23062, + "ku": 23063, + "Ġrotate": 23064, + "program": 23065, + "arah": 23066, + "Design": 23067, + "Ġ(£": 23068, + "Ġstoring": 23069, + "Ġwarrants": 23070, + "Ġjudgement": 23071, + "ĠBrist": 23072, + "usually": 23073, + "photo": 23074, + "ĠRan": 23075, + "ĠPine": 23076, + "Ġoutrageous": 23077, + "ĠValentine": 23078, + "luence": 23079, + "ĠEverybody": 23080, + "Altern": 23081, + "Ġrelevance": 23082, + "Ġterminated": 23083, + "Ġdessert": 23084, + "Ġfulfilled": 23085, + "Ġprosecuted": 23086, + "ĠWords": 23087, + "Ġmigrant": 23088, + "Ġcultivation": 23089, + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ": 23090, + "idelity": 23091, + "ĠVern": 23092, + "ĠLogin": 23093, + "Ġmetaphor": 23094, + "ĠTip": 23095, + "Ġrecruits": 23096, + "ĠPig": 23097, + "ribing": 23098, + "Ġenthusiasts": 23099, + "exper": 23100, + "Ġfrightening": 23101, + "ĠHair": 23102, + "anson": 23103, + "strate": 23104, + "Ġhi": 23105, + "Height": 23106, + "Ġowning": 23107, + "none": 23108, + "Ġdislike": 23109, + "Ġknives": 23110, + "pherd": 23111, + "Ġloudly": 23112, + "ĠAPIs": 23113, + "Display": 23114, + "ĠLac": 23115, + "ĠUSS": 23116, + "abl": 23117, + "verages": 23118, + "Jew": 23119, + "Ġ172": 23120, + "ĠHistorical": 23121, + "atoon": 23122, + "ĠPhysics": 23123, + "intern": 23124, + "Ġwarmth": 23125, + "Ġtopp": 23126, + "DM": 23127, + "Ġgunman": 23128, + "Ġemperor": 23129, + "odi": 23130, + "ãĥ£": 23131, + "inatory": 23132, + "ĠRib": 23133, + "Ġ131": 23134, + "ĠSaturn": 23135, + "ĠShining": 23136, + "Ġwaking": 23137, + "Quotes": 23138, + "Ġcomedian": 23139, + "enberg": 23140, + "½": 23141, + "Ġbelievers": 23142, + "Ġpaperwork": 23143, + "custom": 23144, + "Ġlev": 23145, + "Ġlament": 23146, + "Ġpouring": 23147, + "222": 23148, + "political": 23149, + "ĠSupplement": 23150, + "maid": 23151, + "Ġcruelty": 23152, + "Ġtread": 23153, + "ysics": 23154, + "Aw": 23155, + "rites": 23156, + "Ġmodifier": 23157, + "ĠPosition": 23158, + "Adam": 23159, + "lb": 23160, + "ubs": 23161, + "Ġimperfect": 23162, + "Ġclusters": 23163, + "ĠEngineer": 23164, + "ĠCherry": 23165, + "Ġinauguration": 23166, + "ĠSau": 23167, + "Ġembodiment": 23168, + "ĠUncle": 23169, + "Ġoverr": 23170, + "Ġexplosions": 23171, + "cule": 23172, + "ĠPrinceton": 23173, + "ĠAndrea": 23174, + "Ġincorrectly": 23175, + "Ġearnest": 23176, + "Ġpilgr": 23177, + "ĠSprint": 23178, + "Ġsleeve": 23179, + "Ġhears": 23180, + "ĠAmazing": 23181, + "Ġbrowsing": 23182, + "agin": 23183, + "Ġhomeland": 23184, + "Ġhaw": 23185, + "Ġdiving": 23186, + "istered": 23187, + "178": 23188, + "Ġbargaining": 23189, + "ĠArcade": 23190, + "Ġdelegate": 23191, + "terson": 23192, + "................................................................": 23193, + "ĠJacksonville": 23194, + "275": 23195, + "Ġstagn": 23196, + "Ġadam": 23197, + "ĠSherman": 23198, + "CB": 23199, + "Ġsuburb": 23200, + "ĠFoods": 23201, + "Ġconverting": 23202, + "ĠArist": 23203, + "Ġchambers": 23204, + "love": 23205, + "Ġamino": 23206, + "ĠGan": 23207, + "Ġmadness": 23208, + "mc": 23209, + "ĠUSE": 23210, + "defined": 23211, + "Ġultr": 23212, + "indust": 23213, + "Ġwolves": 23214, + "lance": 23215, + "Additionally": 23216, + "Ġcracks": 23217, + "asia": 23218, + "ĠReason": 23219, + "ĠPump": 23220, + "Ġaccidental": 23221, + "ĠLaser": 23222, + "ĠRid": 23223, + "Ġinitialized": 23224, + "elli": 23225, + "Ġunnamed": 23226, + "Ġnoun": 23227, + "ĠPassed": 23228, + "Ġhostage": 23229, + "ĠEthiop": 23230, + "shirts": 23231, + "Ġunrel": 23232, + "ĠEmbassy": 23233, + "Ġ1941": 23234, + "Ġatoms": 23235, + "Ġpurported": 23236, + "164": 23237, + "ĠFi": 23238, + "Ġgallons": 23239, + "ĠMonica": 23240, + "Ġpg": 23241, + "enment": 23242, + "Ġsorted": 23243, + "ĠGospel": 23244, + "Ġheights": 23245, + "Ġtraced": 23246, + "Ġundergoing": 23247, + "Shell": 23248, + "Ġsacks": 23249, + "Ġproportions": 23250, + "Ġhalluc": 23251, + "Font": 23252, + "acet": 23253, + "Ġwarmer": 23254, + "ĠINTER": 23255, + "Ġgrabbing": 23256, + "Plug": 23257, + "Ġrealization": 23258, + "ĠBurke": 23259, + "Ġenchant": 23260, + "ATER": 23261, + "ĠSeed": 23262, + "Ġabundant": 23263, + "FM": 23264, + "Ġcivic": 23265, + "Vs": 23266, + "isi": 23267, + "Ġvow": 23268, + "Ġreper": 23269, + "ĠPartnership": 23270, + "Ġpenetration": 23271, + "Ġaxe": 23272, + "Ġshattered": 23273, + "ĠZombies": 23274, + "Ġvinyl": 23275, + "ĠAlert": 23276, + "eon": 23277, + "Ġobliged": 23278, + "ĠIllust": 23279, + "ĠPlaza": 23280, + "ĠFrontier": 23281, + "Ġdavidjl": 23282, + "ĠSerial": 23283, + "ĠHav": 23284, + "ĠNutrition": 23285, + "Bi": 23286, + "ĠâĸĪ": 23287, + "ĠJays": 23288, + "linux": 23289, + "Ġhurry": 23290, + "Ġvoy": 23291, + "Ġhopeless": 23292, + "ĠStealth": 23293, + "Ġãģ": 23294, + "essors": 23295, + "ttle": 23296, + "borg": 23297, + "ĠSafari": 23298, + "fell": 23299, + "Ġwary": 23300, + "due": 23301, + "ĠAbove": 23302, + "Ha": 23303, + "ELL": 23304, + "Ġnotor": 23305, + "ĠWon": 23306, + "Too": 23307, + "Ġoccupations": 23308, + "Ġpossessions": 23309, + "Ġinviting": 23310, + "Ġpredators": 23311, + "Ġaccelerated": 23312, + "Ġ157": 23313, + "uterte": 23314, + "ĠCube": 23315, + "east": 23316, + "account": 23317, + "Give": 23318, + "Ġtransplant": 23319, + "redients": 23320, + "idable": 23321, + "Ġscreenshots": 23322, + "ĠGund": 23323, + "ĠFS": 23324, + "Ġtravelers": 23325, + "Ġsensory": 23326, + "ĠFiat": 23327, + "ĠRockets": 23328, + "İĭ": 23329, + "_{": 23330, + "Friend": 23331, + "Ġcharming": 23332, + "ALS": 23333, + "Ġenjoyment": 23334, + "mph": 23335, + "Ġ5000": 23336, + "ĠREG": 23337, + "ÙĨ": 23338, + "bia": 23339, + "Ġcompilation": 23340, + "rost": 23341, + "ĠVP": 23342, + "ĠSchne": 23343, + "2019": 23344, + "Ġcopying": 23345, + "MORE": 23346, + "ĠFlore": 23347, + "falls": 23348, + "215": 23349, + "total": 23350, + "Ġdisciples": 23351, + "double": 23352, + "Ġexceeding": 23353, + "Ġsmashed": 23354, + "Ġconceptual": 23355, + "ĠRomania": 23356, + "ĠBrent": 23357, + "ĠICE": 23358, + "ĠTou": 23359, + "Ġgrap": 23360, + "Ġnails": 23361, + "189": 23362, + "ãĥĺ": 23363, + "Ġprocure": 23364, + "eur": 23365, + "Ġconfirming": 23366, + "ĠCec": 23367, + "awi": 23368, + "ĠEden": 23369, + "Ġng": 23370, + "Ġengineered": 23371, + "atics": 23372, + "Ġhooked": 23373, + "Ġdisgusting": 23374, + "ĠMurder": 23375, + "ãĤ¿": 23376, + "Library": 23377, + "Ġ168": 23378, + "Almost": 23379, + "hematic": 23380, + "Menu": 23381, + "ĠNotre": 23382, + "ĠJur": 23383, + "Ġkidnapped": 23384, + "Ġhacker": 23385, + "ĠJade": 23386, + "Ġcreepy": 23387, + "Ġdrawings": 23388, + "ĠSponsor": 23389, + "Ġcyclists": 23390, + "ĠGoblin": 23391, + "Ġoptimized": 23392, + "Ġstaged": 23393, + "ĠMcD": 23394, + "between": 23395, + "Age": 23396, + "eno": 23397, + "Sex": 23398, + "ĠWide": 23399, + "nings": 23400, + "avis": 23401, + "Ġincapable": 23402, + "ĠKob": 23403, + "Ġrewarding": 23404, + "ĠLone": 23405, + "olescent": 23406, + "Ġcontracted": 23407, + "Ġsticky": 23408, + "Jose": 23409, + "Ball": 23410, + "fest": 23411, + "ĠInput": 23412, + "ĠRecently": 23413, + "Ġtomat": 23414, + "square": 23415, + "Application": 23416, + "Ġnitrogen": 23417, + "Ġduplicate": 23418, + "ĠRecon": 23419, + "ĠDear": 23420, + "London": 23421, + "Ġintra": 23422, + "Ġdock": 23423, + "Ġoutreach": 23424, + "ĠMillion": 23425, + "Ġmammals": 23426, + "ampton": 23427, + "VAL": 23428, + "Ġsnaps": 23429, + "Ġdos": 23430, + "ĠWhole": 23431, + "ĠReady": 23432, + "Try": 23433, + "ĠWinnipeg": 23434, + "earance": 23435, + "Ġincurred": 23436, + "renched": 23437, + "ĠNSW": 23438, + "ilot": 23439, + "raine": 23440, + "Ġcube": 23441, + "got": 23442, + "Ġrunway": 23443, + "etermined": 23444, + "ĠHawks": 23445, + "Ġsurvivor": 23446, + "ĠWish": 23447, + "ĠDin": 23448, + "ĠDEF": 23449, + "ĠVault": 23450, + "187": 23451, + "Ġmushrooms": 23452, + "Ġcrisp": 23453, + "bey": 23454, + "ĠDiscovery": 23455, + "Ġdevelopmental": 23456, + "Ġparadigm": 23457, + "Ġchaotic": 23458, + "ĠTsu": 23459, + "Ġ333": 23460, + "bons": 23461, + "Ġbacterial": 23462, + "Ġcommits": 23463, + "Ġcosmic": 23464, + "Ġmega": 23465, + "ocative": 23466, + "ĠPaint": 23467, + "ophobic": 23468, + "Ġvain": 23469, + "Ġcarved": 23470, + "ĠThief": 23471, + "ĠGul": 23472, + "owship": 23473, + "Ġcites": 23474, + "ĠEdinburgh": 23475, + "Ġdiminished": 23476, + "Ġacknowledges": 23477, + "ĠKills": 23478, + "Ġmicrow": 23479, + "ĠHera": 23480, + "Ġseniors": 23481, + "Ġwhereby": 23482, + "Hop": 23483, + "atron": 23484, + "Ġunavailable": 23485, + "ĠNate": 23486, + "Ġ480": 23487, + "Ġslated": 23488, + "ĠRebecca": 23489, + "ĠBattery": 23490, + "Ġgrammar": 23491, + "Ġheadset": 23492, + "Ġcursor": 23493, + "Ġexcluding": 23494, + "anye": 23495, + "aundering": 23496, + "ebin": 23497, + "Ġfeasible": 23498, + "ĠPublishing": 23499, + "ĠLabs": 23500, + "ĠCliff": 23501, + "ĠFerrari": 23502, + "Ġpac": 23503, + "visible": 23504, + "marked": 23505, + "pell": 23506, + "Ġpolite": 23507, + "Ġstaggering": 23508, + "ĠGalactic": 23509, + "Ġsuperst": 23510, + "Ġparan": 23511, + "ĠOfficers": 23512, + "ãĢģ": 23513, + "Ġspecifics": 23514, + "ulus": 23515, + "239": 23516, + "ĠPaste": 23517, + "AMP": 23518, + "ĠPanama": 23519, + "ĠDelete": 23520, + "anguard": 23521, + "restrial": 23522, + "Ġheroic": 23523, + "ĠDy": 23524, + "اÙĦ": 23525, + "Ġincumbent": 23526, + "Ġcrunch": 23527, + "tro": 23528, + "Ġscoop": 23529, + "Ġblogger": 23530, + "Ġsellers": 23531, + "uren": 23532, + "Ġmedicines": 23533, + "ĠCaps": 23534, + "ĠAnimation": 23535, + "oxy": 23536, + "Ġoutward": 23537, + "Ġinquiries": 23538, + "229": 23539, + "Ġpsychologist": 23540, + "ĠSask": 23541, + "evil": 23542, + "Ġcontaminated": 23543, + "ãĤ¨": 23544, + "herence": 23545, + "Ġbranded": 23546, + "ĠAbdul": 23547, + "zh": 23548, + "Ġparagraphs": 23549, + "Ġmins": 23550, + "Ġcorrelated": 23551, + "erb": 23552, + "Ġimpart": 23553, + "Ġmilestone": 23554, + "ĠSolutions": 23555, + "otle": 23556, + "Ġundercover": 23557, + "Ġmarched": 23558, + "ĠChargers": 23559, + "fax": 23560, + "ĠSecrets": 23561, + "Ġruth": 23562, + "weather": 23563, + "Ġfeminine": 23564, + "Ġsham": 23565, + "Ġprestigious": 23566, + "iggins": 23567, + "Ġsung": 23568, + "history": 23569, + "ettle": 23570, + "ggie": 23571, + "Ġoutdated": 23572, + "oland": 23573, + "Ġperceptions": 23574, + "ĠSession": 23575, + "ĠDodgers": 23576, + "uj": 23577, + "ĠEND": 23578, + "Doc": 23579, + "Ġdeficiency": 23580, + "Grand": 23581, + "ĠJoker": 23582, + "Ġretrospect": 23583, + "Ġdiagnostic": 23584, + "Ġharmless": 23585, + "Ġrogue": 23586, + "ĠAval": 23587, + "Equ": 23588, + "Ġtransc": 23589, + "ĠRobertson": 23590, + "ĠDepending": 23591, + "ĠBurns": 23592, + "ivo": 23593, + "Ġhostility": 23594, + "Features": 23595, + "ĵĺ": 23596, + "Ġdiscomfort": 23597, + "ĠLCD": 23598, + "specified": 23599, + "ĠExpect": 23600, + "340": 23601, + "Ġimperative": 23602, + "ĠRegular": 23603, + "Chinese": 23604, + "Ġstatewide": 23605, + "Ġsymm": 23606, + "Ġloops": 23607, + "Ġautumn": 23608, + "Nick": 23609, + "Ġshaping": 23610, + "Ġquot": 23611, + "Ġcherry": 23612, + "ĠCrossref": 23613, + "è¦ļéĨĴ": 23614, + "Standard": 23615, + "heed": 23616, + "ĠDell": 23617, + "ĠVietnamese": 23618, + "Ġost": 23619, + "ĠValkyrie": 23620, + "OA": 23621, + "Assad": 23622, + "Ġrebound": 23623, + "ĠTraffic": 23624, + "places": 23625, + "æĺ": 23626, + "ĠBuc": 23627, + "172": 23628, + "Ġshelters": 23629, + "Ġinsisting": 23630, + "ĠCertainly": 23631, + "ĠKenneth": 23632, + "ĠTCP": 23633, + "Ġpenal": 23634, + "ĠReplay": 23635, + "heard": 23636, + "Ġdialect": 23637, + "iza": 23638, + "ĠFY": 23639, + "itcher": 23640, + "ĠDL": 23641, + "Ġspiral": 23642, + "Ġquarterbacks": 23643, + "Ġhull": 23644, + "Ġgoogle": 23645, + "Ġtodd": 23646, + "ĠSterling": 23647, + "ĠPlate": 23648, + "Ġspying": 23649, + "mbol": 23650, + "ĠRealm": 23651, + "ĠProced": 23652, + "ĠCrash": 23653, + "Ġterminate": 23654, + "Ġprotesting": 23655, + "Center": 23656, + "guided": 23657, + "Ġuncover": 23658, + "Ġboycott": 23659, + "Ġrealizes": 23660, + "sound": 23661, + "Ġpretending": 23662, + "ĠVas": 23663, + "1980": 23664, + "Ġframed": 23665, + "Ġ139": 23666, + "Ġdescended": 23667, + "Ġrehabilitation": 23668, + "Ġborrowing": 23669, + "ĠBuch": 23670, + "Ġblur": 23671, + "Ron": 23672, + "ĠFrozen": 23673, + "enza": 23674, + "Chief": 23675, + "ĠPoor": 23676, + "Ġtranslates": 23677, + "MIN": 23678, + "Ġ212": 23679, + "JECT": 23680, + "Ġerupted": 23681, + "Ġsuccesses": 23682, + "SEC": 23683, + "Ġplague": 23684, + "Ġgems": 23685, + "doms": 23686, + "Ġstretches": 23687, + "ĠSpy": 23688, + "Ġstorytelling": 23689, + "Credit": 23690, + "ĠPush": 23691, + "Ġtraction": 23692, + "Ġineffective": 23693, + "ĠLuna": 23694, + "Ġtapes": 23695, + "Ġanalytics": 23696, + "ercise": 23697, + "Ġprogrammes": 23698, + "ĠCarbon": 23699, + "Ġbehold": 23700, + "heavy": 23701, + "ĠConservation": 23702, + "ĠFIR": 23703, + "Ġsack": 23704, + "termin": 23705, + "ricks": 23706, + "Ġhoused": 23707, + "Ġunusually": 23708, + "Ice": 23709, + "Ġexecuting": 23710, + "ĠMoroc": 23711, + "eday": 23712, + "Ġeditions": 23713, + "Ġsmarter": 23714, + "ĠBA": 23715, + "Ġoutlaw": 23716, + "Ġvanished": 23717, + "iba": 23718, + "ALSE": 23719, + "ĠSilva": 23720, + "238": 23721, + "Could": 23722, + "Ġphilosopher": 23723, + "Ġevacuated": 23724, + "Secret": 23725, + "142": 23726, + "Ġvisas": 23727, + "ãĤ¬": 23728, + "ĠMalt": 23729, + "ĠClearly": 23730, + "ĠNiger": 23731, + "ĠCairo": 23732, + "ĠFist": 23733, + "380": 23734, + "ĠXML": 23735, + "auto": 23736, + "itant": 23737, + "Ġreinforced": 23738, + "Record": 23739, + "ĠSurvivor": 23740, + "GHz": 23741, + "Ġscrews": 23742, + "parents": 23743, + "Ġoceans": 23744, + "mares": 23745, + "Ġbrakes": 23746, + "vasive": 23747, + "Ġhello": 23748, + "ĠSIM": 23749, + "rimp": 23750, + "Ġore": 23751, + "ĠArmour": 23752, + "247": 23753, + "Ġterrific": 23754, + "Ġtones": 23755, + "141": 23756, + "ĠMinutes": 23757, + "Episode": 23758, + "Ġcurves": 23759, + "Ġinflammatory": 23760, + "Ġbatting": 23761, + "ĠBeautiful": 23762, + "Lay": 23763, + "Ġunpop": 23764, + "vable": 23765, + "Ġriots": 23766, + "ĠTactics": 23767, + "baugh": 23768, + "ĠCock": 23769, + "Ġorgasm": 23770, + "ĠSas": 23771, + "Ġconstructor": 23772, + "etz": 23773, + "Gov": 23774, + "Ġantagon": 23775, + "Ġtheat": 23776, + "Ġdeeds": 23777, + "hao": 23778, + "cuts": 23779, + "ĠMcCl": 23780, + "Ġum": 23781, + "ĠScientists": 23782, + "Ġgrassroots": 23783, + "yssey": 23784, + "\"]=>": 23785, + "Ġsurfaced": 23786, + "Ġshades": 23787, + "Ġneighbours": 23788, + "Ġadvertis": 23789, + "oya": 23790, + "Ġmerged": 23791, + "Upon": 23792, + "Ġgad": 23793, + "Ġanticipate": 23794, + "Anyway": 23795, + "Ġslogan": 23796, + "Ġdisrespect": 23797, + "Iran": 23798, + "ĠTB": 23799, + "acted": 23800, + "Ġsubpoen": 23801, + "mediately": 23802, + "OOOO": 23803, + "Ġwaiver": 23804, + "Ġvulnerabilities": 23805, + "ottesville": 23806, + "ĠHuffington": 23807, + "Josh": 23808, + "ĠDH": 23809, + "Monday": 23810, + "ĠEllen": 23811, + "Know": 23812, + "xon": 23813, + "items": 23814, + "228": 23815, + "Ġfills": 23816, + "ĠNike": 23817, + "Ġcumulative": 23818, + "andals": 23819, + "Ir": 23820, + "Ġì": 23821, + "Ġfriction": 23822, + "igator": 23823, + "Ġscans": 23824, + "ĠVienna": 23825, + "ldom": 23826, + "Ġperformers": 23827, + "Prim": 23828, + "Ġbidding": 23829, + "Mur": 23830, + "Ġleaned": 23831, + "ĠPrix": 23832, + "alks": 23833, + "Ġ[âĢ¦]": 23834, + "ĠTwitch": 23835, + "ĠDeveloper": 23836, + "ĠGir": 23837, + "Ġcallback": 23838, + "Abstract": 23839, + "Ġaccustomed": 23840, + "Ġfreedoms": 23841, + "ĠPG": 23842, + "uracy": 23843, + "Ġlump": 23844, + "isman": 23845, + ",,,,": 23846, + "1992": 23847, + "ĠRED": 23848, + "Ġworm": 23849, + "Match": 23850, + "ĠPlatinum": 23851, + "IJ": 23852, + "ĠOwner": 23853, + "Trivia": 23854, + "compl": 23855, + "Ġnewborn": 23856, + "Ġfantas": 23857, + "Own": 23858, + "Ġ1959": 23859, + "Ġsympath": 23860, + "Ġubiqu": 23861, + "Ġoutputs": 23862, + "Ġallev": 23863, + "Ġprag": 23864, + "Kevin": 23865, + "Ġfavors": 23866, + "Ġburial": 23867, + "Ġnurt": 23868, + "solete": 23869, + "cache": 23870, + "Ġ156": 23871, + "Ġunlocks": 23872, + "techn": 23873, + "Making": 23874, + "Ġconquer": 23875, + "adic": 23876, + "æĸ": 23877, + "Ġelf": 23878, + "Ġelectorate": 23879, + "ĠKurds": 23880, + "ĠStack": 23881, + "ĠSamurai": 23882, + "Ġâĺħ": 23883, + "Ġ{}": 23884, + "ĠSaid": 23885, + "ĠFallout": 23886, + "Ġkindness": 23887, + "ĠCustoms": 23888, + "ĠBoulevard": 23889, + "Ġhelicopters": 23890, + "otics": 23891, + "ĠVeget": 23892, + "comment": 23893, + "Ġcriticised": 23894, + "Ġpolished": 23895, + "ĠRemix": 23896, + "ĠCultural": 23897, + "Ġrecons": 23898, + "Ġdoi": 23899, + "atem": 23900, + "Screen": 23901, + "Ġbarred": 23902, + "Comments": 23903, + "ĠGenerally": 23904, + "Ġslap": 23905, + "720": 23906, + "Vari": 23907, + "pine": 23908, + "Ġempt": 23909, + "Ġhats": 23910, + "ĠPlaying": 23911, + "lab": 23912, + "average": 23913, + "forms": 23914, + "ĠCotton": 23915, + "Ġcans": 23916, + "ĠDON": 23917, + "ĠSomalia": 23918, + "Crypt": 23919, + "ĠIncreases": 23920, + "Ever": 23921, + "modern": 23922, + "Ġsurgeon": 23923, + "3000": 23924, + "Ġrandomized": 23925, + "================================================================": 23926, + "Bern": 23927, + "impl": 23928, + "ĠCOR": 23929, + "Ġproclaim": 23930, + "thouse": 23931, + "Ġtoes": 23932, + "Ġample": 23933, + "Ġpreserving": 23934, + "Ġdisbel": 23935, + "grand": 23936, + "Besides": 23937, + "Ġsilk": 23938, + "ĠPattern": 23939, + "hm": 23940, + "Ġenterprises": 23941, + "Ġaffidavit": 23942, + "ĠAdvisory": 23943, + "Ġadvertised": 23944, + "ĠReligious": 23945, + "sections": 23946, + "psych": 23947, + "ĠFields": 23948, + "aways": 23949, + "Ġhashtag": 23950, + "ĠNightmare": 23951, + "Ġvampire": 23952, + "Ġforensic": 23953, + "rossover": 23954, + "nar": 23955, + "Ġnavy": 23956, + "Ġvacant": 23957, + "ĠDuel": 23958, + "Ġhallway": 23959, + "Ġfacebook": 23960, + "identally": 23961, + "ĠNRA": 23962, + "Ġmatt": 23963, + "Ġhurricane": 23964, + "ĠKirby": 23965, + "ĠPuzzle": 23966, + "Ġskirt": 23967, + "oust": 23968, + "dullah": 23969, + "Ġanalogy": 23970, + "inion": 23971, + "Ġtomatoes": 23972, + "ĠNV": 23973, + "ĠPeak": 23974, + "ĠMeyer": 23975, + "Ġappointments": 23976, + "Ġmasc": 23977, + "Ġalley": 23978, + "rehend": 23979, + "Ġcharities": 23980, + "Ġundo": 23981, + "Ġdestinations": 23982, + "ĠTesting": 23983, + "\">\"": 24618, + "cats": 24619, + "*.": 24620, + "Ġgestures": 24621, + "general": 24622, + "League": 24623, + "Ġpackets": 24624, + "ĠInspector": 24625, + "ĠBerg": 24626, + "Ġfraudulent": 24627, + "Ġcriticize": 24628, + "Fun": 24629, + "Ġblaming": 24630, + "ndra": 24631, + "Ġslash": 24632, + "ĠEston": 24633, + "Ġproposing": 24634, + "Ġwhales": 24635, + "Ġtherapist": 24636, + "Ġsubset": 24637, + "Ġleisure": 24638, + "ELD": 24639, + "ĠCVE": 24640, + "ĠActivity": 24641, + "Ġculmin": 24642, + "shop": 24643, + "ĠDAY": 24644, + "ischer": 24645, + "ĠAdmiral": 24646, + "ĠAttacks": 24647, + "Ġ1958": 24648, + "Ġmemoir": 24649, + "Ġfolded": 24650, + "Ġsexist": 24651, + "Ġ153": 24652, + "ĠLI": 24653, + "Ġreadings": 24654, + "Ġembarrassment": 24655, + "ĠEmployment": 24656, + "wart": 24657, + "chin": 24658, + "Ġcontinuation": 24659, + "lia": 24660, + "Recently": 24661, + "Ġduel": 24662, + "Ġevacuation": 24663, + "ĠKashmir": 24664, + "Ġdisposition": 24665, + "ĠRig": 24666, + "Ġbolts": 24667, + "Ġinsurers": 24668, + "467": 24669, + "Mex": 24670, + "Ġretaliation": 24671, + "Ġmisery": 24672, + "Ġunreasonable": 24673, + "raining": 24674, + "Imm": 24675, + "ĠPU": 24676, + "emer": 24677, + "Ġgenital": 24678, + "ãĤ³": 24679, + "ĠCandy": 24680, + "Ġonions": 24681, + "ĠPatt": 24682, + "liner": 24683, + "Ġconceded": 24684, + "Ġfa": 24685, + "Ġforc": 24686, + "ĠHernandez": 24687, + "ĠGeoff": 24688, + "debian": 24689, + "ĠTeams": 24690, + "Ġcries": 24691, + "Ġhomeowners": 24692, + "237": 24693, + "ABC": 24694, + "Ġstitch": 24695, + "Ġstatistic": 24696, + "Ġheaders": 24697, + "ĠBiology": 24698, + "Ġmotors": 24699, + "ĠGEN": 24700, + "ĠLip": 24701, + "Ġhates": 24702, + "Ġheel": 24703, + "Self": 24704, + "ipl": 24705, + "EDIT": 24706, + "orting": 24707, + "Ġannot": 24708, + "ĠSpeech": 24709, + "oldemort": 24710, + "ĠJavascript": 24711, + "ĠLeBron": 24712, + "Ġfootprint": 24713, + "Ġfn": 24714, + "Ġseizures": 24715, + "nas": 24716, + "hide": 24717, + "Ġ1954": 24718, + "ĠBee": 24719, + "ĠDeclaration": 24720, + "ĠKatie": 24721, + "Ġreservations": 24722, + "NR": 24723, + "female": 24724, + "Ġsaturated": 24725, + "Ġbiblical": 24726, + "Ġtrolls": 24727, + "Device": 24728, + "photos": 24729, + "Ġdrums": 24730, + "ãĥīãĥ©ãĤ´ãĥ³": 24731, + "Night": 24732, + "fighter": 24733, + "ĠHak": 24734, + "riber": 24735, + "Ġcush": 24736, + "Ġdisciplinary": 24737, + "baum": 24738, + "ĠGH": 24739, + "ĠSchmidt": 24740, + "ilibrium": 24741, + "Ġsixty": 24742, + "ĠKushner": 24743, + "rots": 24744, + "Ġpund": 24745, + "ĠRac": 24746, + "Ġsprings": 24747, + "Ġconve": 24748, + "Business": 24749, + "Fall": 24750, + "Ġqualifications": 24751, + "Ġverses": 24752, + "Ġnarciss": 24753, + "ĠKoh": 24754, + "ĠWow": 24755, + "ĠCharlottesville": 24756, + "edo": 24757, + "Ġinterrogation": 24758, + "ĠWool": 24759, + "365": 24760, + "Brian": 24761, + "Ġâľĵ": 24762, + "Ġalleges": 24763, + "onds": 24764, + "idation": 24765, + "ĠJackie": 24766, + "yu": 24767, + "Ġlakes": 24768, + "Ġworthwhile": 24769, + "Ġcrystals": 24770, + "ĠJuda": 24771, + "Ġcomprehend": 24772, + "Ġflush": 24773, + "Ġabsorption": 24774, + "ĠOC": 24775, + "Ġfrightened": 24776, + "ĠChocolate": 24777, + "Martin": 24778, + "Ġbuys": 24779, + "Ġbucks": 24780, + "Ġappell": 24781, + "ĠChampionships": 24782, + "Ġlistener": 24783, + "ĠDefensive": 24784, + "Ġcz": 24785, + "uds": 24786, + "ĠMate": 24787, + "Ġreplay": 24788, + "Ġdecorated": 24789, + "Ġsunk": 24790, + "ĠVIP": 24791, + "ĠAnk": 24792, + "Ġ195": 24793, + "aaaa": 24794, + "Nobody": 24795, + "ĠMilk": 24796, + "ĠGur": 24797, + "ĠMk": 24798, + "ĠSara": 24799, + "Ġseating": 24800, + "ĠWid": 24801, + "Track": 24802, + "Ġemploys": 24803, + "Ġgigantic": 24804, + "APP": 24805, + "ãĤ§": 24806, + "inventory": 24807, + "Ġtowel": 24808, + "atche": 24809, + "lasting": 24810, + "ĠTL": 24811, + "Ġlatency": 24812, + "Ġkne": 24813, + "Ber": 24814, + "meaning": 24815, + "Ġupheld": 24816, + "Ġplayground": 24817, + "Ġmant": 24818, + "Side": 24819, + "Ġstereo": 24820, + "Ġnorthwest": 24821, + "Ġexceptionally": 24822, + "Ġrays": 24823, + "Ġrecurring": 24824, + "Drive": 24825, + "Ġupright": 24826, + "Ġabduct": 24827, + "ĠMarathon": 24828, + "Ġgoodbye": 24829, + "Ġalphabet": 24830, + "hp": 24831, + "Ġcourtroom": 24832, + "rington": 24833, + "othing": 24834, + "Tag": 24835, + "Ġdiplomats": 24836, + "Ġbarbar": 24837, + "ĠAqua": 24838, + "183": 24839, + "3333": 24840, + "Ġmaturity": 24841, + "Ġinstability": 24842, + "ĠApache": 24843, + "Ġ===": 24844, + "Ġfasting": 24845, + "ĠGrid": 24846, + "ModLoader": 24847, + "Ġ152": 24848, + "Abs": 24849, + "ĠOperating": 24850, + "etti": 24851, + "Ġacquaint": 24852, + "Donnell": 24853, + "ĠKem": 24854, + "ĠForge": 24855, + "Ġarmored": 24856, + "Mil": 24857, + "Ġphilosophers": 24858, + "invest": 24859, + "Players": 24860, + "âĪ": 24861, + "Ġmyriad": 24862, + "Ġcomrades": 24863, + "Rot": 24864, + "Ġremembering": 24865, + "Ġcorresponds": 24866, + "Ġprogrammers": 24867, + "ĠLynn": 24868, + "Ġolig": 24869, + "Ġcoherent": 24870, + "ynchron": 24871, + "ĠChemical": 24872, + "Ġjugg": 24873, + "pair": 24874, + "posts": 24875, + "Eye": 24876, + "ĠInner": 24877, + "Ġsemester": 24878, + "ottest": 24879, + "ĠEmirates": 24880, + "ricanes": 24881, + "orously": 24882, + "mits": 24883, + "ĠWis": 24884, + "Ġdodge": 24885, + "location": 24886, + "Ġfaded": 24887, + "Amazon": 24888, + "ĠProceed": 24889, + "ĠINFO": 24890, + "journal": 24891, + "ĠTruck": 24892, + "Ten": 24893, + "Ġ217": 24894, + "Ġstatutes": 24895, + "mobile": 24896, + "ĠTypes": 24897, + "Recomm": 24898, + "buster": 24899, + "pex": 24900, + "Ġlegends": 24901, + "Ġheadache": 24902, + "faced": 24903, + "ĠWiFi": 24904, + "ifty": 24905, + "ĠHER": 24906, + "Ġcircuits": 24907, + "ERROR": 24908, + "226": 24909, + "olin": 24910, + "Ġcylinder": 24911, + "ospace": 24912, + "ikers": 24913, + "Prem": 24914, + "Quant": 24915, + "Ġconflicting": 24916, + "Ġslightest": 24917, + "Ġforged": 24918, + "ionage": 24919, + "Stephen": 24920, + "ĠKub": 24921, + "ĠOpportun": 24922, + "ĠHeal": 24923, + "Ġblo": 24924, + "Ġrulers": 24925, + "Ġhuh": 24926, + "Ġsubmarine": 24927, + "fy": 24928, + "asser": 24929, + "Ġallowance": 24930, + "ĠKasich": 24931, + "ĠTas": 24932, + "ĠAustralians": 24933, + "ForgeModLoader": 24934, + "ĠâĨij": 24935, + "ĠMatrix": 24936, + "amins": 24937, + "Ġ1200": 24938, + "ĠAcqu": 24939, + "236": 24940, + "Document": 24941, + "ĠBreaking": 24942, + "193": 24943, + "ĠSubst": 24944, + "ĠRoller": 24945, + "ĠProperties": 24946, + "ĠNI": 24947, + "tier": 24948, + "Ġcrushing": 24949, + "Ġadvocating": 24950, + "Furthermore": 24951, + "keepers": 24952, + "Ġsexism": 24953, + "xd": 24954, + "Ġcaller": 24955, + "ĠSense": 24956, + "chieve": 24957, + "ĠTF": 24958, + "Ġfueled": 24959, + "Ġreminiscent": 24960, + "Ġobsess": 24961, + "urst": 24962, + "Ġuphold": 24963, + "ĠFans": 24964, + "hetics": 24965, + "ĠâĹ": 24966, + "ĠBath": 24967, + "Ġbeverage": 24968, + "Ġoscill": 24969, + "254": 24970, + "Ġpoles": 24971, + "Ġgradual": 24972, + "Ġexting": 24973, + "ĠSuff": 24974, + "ĠSuddenly": 24975, + "Ġliking": 24976, + "Ġ1949": 24977, + "unciation": 24978, + "amination": 24979, + "ĠOmar": 24980, + "ĠLV": 24981, + "ĠConsequently": 24982, + "Ġsynthes": 24983, + "ĠGIF": 24984, + "Ġpains": 24985, + "Ġinteracting": 24986, + "uously": 24987, + "incre": 24988, + "Ġrumor": 24989, + "ĠScientology": 24990, + "197": 24991, + "ĠZig": 24992, + "Ġspelling": 24993, + "ĠASS": 24994, + "Ġextingu": 24995, + "mson": 24996, + "Ġgh": 24997, + "Ġremarked": 24998, + "ĠStrategic": 24999, + "ĠMON": 25000, + "å¥": 25001, + "gae": 25002, + "ĠWHAT": 25003, + "Eric": 25004, + "ĠCampus": 25005, + "Ġmethane": 25006, + "Ġimagin": 25007, + "JUST": 25008, + "ĠAlm": 25009, + "XT": 25010, + "iq": 25011, + "ĠRSS": 25012, + "Ġwrongdoing": 25013, + "atta": 25014, + "Ġbigot": 25015, + "Ġdemonstrators": 25016, + "ĠCalvin": 25017, + "ĠVilla": 25018, + "Ġmembrane": 25019, + "ĠAwesome": 25020, + "Ġbenefic": 25021, + "268": 25022, + "Ġmagnificent": 25023, + "ĠLots": 25024, + "Greg": 25025, + "ĠBoris": 25026, + "Ġdetainees": 25027, + "ĠHerman": 25028, + "Ġwhispered": 25029, + "Ġawe": 25030, + "Professor": 25031, + "funding": 25032, + "Ġphysiological": 25033, + "ĠDestruction": 25034, + "Ġlimb": 25035, + "Ġmanipulated": 25036, + "Ġbubbles": 25037, + "Ġpseud": 25038, + "Ġhydra": 25039, + "ĠBristol": 25040, + "Ġstellar": 25041, + "ĠExpansion": 25042, + "ĠKell": 25043, + "ĠInterestingly": 25044, + "Ġmans": 25045, + "Ġdragging": 25046, + "Ġecological": 25047, + "ĠFit": 25048, + "Ġgent": 25049, + "Ġbenefited": 25050, + "ĠHaiti": 25051, + "Ġpolyg": 25052, + "ãĥİ": 25053, + "Ġ2030": 25054, + "Ġprow": 25055, + "Ġreconstruction": 25056, + "Ġwast": 25057, + "Ġpsychic": 25058, + "ĠGreeks": 25059, + "Handler": 25060, + "162": 25061, + "ĠPulse": 25062, + "Ġsolicit": 25063, + "Ġsys": 25064, + "Ġinflux": 25065, + "ĠGentle": 25066, + "percent": 25067, + "Ġproliferation": 25068, + "Ġtaxable": 25069, + "Ġdisregard": 25070, + "Ġescaping": 25071, + "Ġginger": 25072, + "Ġwithstand": 25073, + "Ġdevastated": 25074, + "ĠDew": 25075, + "series": 25076, + "Ġinjected": 25077, + "elaide": 25078, + "Ġturnover": 25079, + "heat": 25080, + "ĻĤ": 25081, + "Happy": 25082, + "ĠSilent": 25083, + "ãĤŃ": 25084, + "ivism": 25085, + "Ġirrational": 25086, + "AMA": 25087, + "Ġreef": 25088, + "rub": 25089, + "Ġ162": 25090, + "Ġbankers": 25091, + "ĠEthics": 25092, + "vv": 25093, + "Ġcriticisms": 25094, + "Kn": 25095, + "186": 25096, + "Movie": 25097, + "ĠTories": 25098, + "Ġnood": 25099, + "Ġdistortion": 25100, + "False": 25101, + "odore": 25102, + "Ġtasty": 25103, + "Research": 25104, + "ĠUID": 25105, + "-)": 25106, + "Ġdivorced": 25107, + "ĠMU": 25108, + "ĠHayes": 25109, + "ĠIsn": 25110, + "iani": 25111, + "ĠHQ": 25112, + "Ġ\"#": 25113, + "ignant": 25114, + "Ġtraumatic": 25115, + "ĠLing": 25116, + "Hun": 25117, + "Ġsabot": 25118, + "online": 25119, + "random": 25120, + "Ġrenamed": 25121, + "rared": 25122, + "KA": 25123, + "dead": 25124, + "ét": 25125, + "ĠAssistance": 25126, + "Ġseaf": 25127, + "++++++++": 25128, + "Ġseldom": 25129, + "ĠWebb": 25130, + "Ġboolean": 25131, + "ulet": 25132, + "Ġrefrain": 25133, + "ĠDIY": 25134, + "rule": 25135, + "Ġshutting": 25136, + "Ġutilizing": 25137, + "loading": 25138, + "ĠParam": 25139, + "coal": 25140, + "ooter": 25141, + "Ġattracting": 25142, + "ĠDol": 25143, + "Ġhers": 25144, + "agnetic": 25145, + "ĠReach": 25146, + "imo": 25147, + "Ġdiscarded": 25148, + "ĠPip": 25149, + "015": 25150, + "ür": 25151, + "Ġmug": 25152, + "Imagine": 25153, + "COL": 25154, + "Ġcursed": 25155, + "ĠShows": 25156, + "ĠCurtis": 25157, + "ĠSachs": 25158, + "speaking": 25159, + "ĠVista": 25160, + "ĠFramework": 25161, + "ongo": 25162, + "Ġsubreddit": 25163, + "Ġcrus": 25164, + "ĠOval": 25165, + "Row": 25166, + "growing": 25167, + "Ġinstallment": 25168, + "Ġglac": 25169, + "ĠAdvance": 25170, + "ECK": 25171, + "ĠLGBTQ": 25172, + "LEY": 25173, + "Ġacet": 25174, + "Ġsuccessive": 25175, + "ĠNicole": 25176, + "Ġ1957": 25177, + "Quote": 25178, + "Ġcircumstance": 25179, + "ackets": 25180, + "Ġ142": 25181, + "ortium": 25182, + "Ġguessed": 25183, + "ĠFrame": 25184, + "Ġperpetrators": 25185, + "ĠAviation": 25186, + "ĠBench": 25187, + "Ġhandc": 25188, + "Ap": 25189, + "Ġ1956": 25190, + "259": 25191, + "rand": 25192, + "NetMessage": 25193, + "din": 25194, + "urtles": 25195, + "hig": 25196, + "ĠVIII": 25197, + "ffiti": 25198, + "ĠSwords": 25199, + "bial": 25200, + "Ġkidnapping": 25201, + "device": 25202, + "Ġbarn": 25203, + "ĠEli": 25204, + "aucas": 25205, + "Send": 25206, + "Constructed": 25207, + "Ġ½": 25208, + "Ġneedles": 25209, + "Ġadvertisements": 25210, + "Ġvou": 25211, + "Ġexhibited": 25212, + "ĠFortress": 25213, + "Ask": 25214, + "Berry": 25215, + "TYPE": 25216, + "Ġcancers": 25217, + "umping": 25218, + "ĠTerritory": 25219, + "Ġprud": 25220, + "Ġnas": 25221, + "Ġatheist": 25222, + "Ġbalances": 25223, + "ãģŁ": 25224, + "ĠShawn": 25225, + "&&": 25226, + "Ġlandsc": 25227, + "ĠRGB": 25228, + "Ġpetty": 25229, + "Ġexcellence": 25230, + "Ġtranslations": 25231, + "Ġparcel": 25232, + "ĠChev": 25233, + "East": 25234, + "ĠOutput": 25235, + "imi": 25236, + "Ġambient": 25237, + "ĠThreat": 25238, + "Ġvillains": 25239, + "Ġ550": 25240, + "ICA": 25241, + "Ġtaller": 25242, + "Ġleaking": 25243, + "cup": 25244, + "Ġpolish": 25245, + "Ġinfectious": 25246, + "ĠKC": 25247, + "Ġ@@": 25248, + "background": 25249, + "Ġbureaucracy": 25250, + "ĠSai": 25251, + "unless": 25252, + "itious": 25253, + "ĠSkype": 25254, + "Atl": 25255, + "IDENT": 25256, + "008": 25257, + "Ġhypocr": 25258, + "Ġpitchers": 25259, + "Ġguessing": 25260, + "ĠFINAL": 25261, + "Between": 25262, + "Ġvillagers": 25263, + "Ġ252": 25264, + "fashion": 25265, + "ĠTunis": 25266, + "Beh": 25267, + "ĠExc": 25268, + "ĠMID": 25269, + "288": 25270, + "ĠHaskell": 25271, + "196": 25272, + "ĠNOR": 25273, + "Ġspecs": 25274, + "Ġinvari": 25275, + "Ġglut": 25276, + "ĠCars": 25277, + "Ġimpulse": 25278, + "Ġhonors": 25279, + "gel": 25280, + "Ġjurisdictions": 25281, + "ĠBundle": 25282, + "ulas": 25283, + "California": 25284, + "ĠIncrease": 25285, + "Ġpear": 25286, + "Ġsingles": 25287, + "Ġcues": 25288, + "Ġunderwent": 25289, + "ĠWS": 25290, + "Ġexaggerated": 25291, + "Ġdubious": 25292, + "Ġflashing": 25293, + "LOG": 25294, + ")].": 25295, + "Journal": 25296, + "tg": 25297, + "Van": 25298, + "ĠIstanbul": 25299, + "ĠInsp": 25300, + "ĠFranken": 25301, + "Draw": 25302, + "Ġsadness": 25303, + "Ġironic": 25304, + "ĠFry": 25305, + "xc": 25306, + "Ġ164": 25307, + "isch": 25308, + "Way": 25309, + "ĠProtestant": 25310, + "horn": 25311, + "Ġunaff": 25312, + "ĠViv": 25313, + "illas": 25314, + "ĠProductions": 25315, + "ĠHogan": 25316, + "Ġperimeter": 25317, + "ĠSisters": 25318, + "Ġspontaneous": 25319, + "Ġdownside": 25320, + "Ġdescendants": 25321, + "Ġorn": 25322, + "worm": 25323, + "Japanese": 25324, + "Ġ1955": 25325, + "Ġ151": 25326, + "ĠDoing": 25327, + "elsen": 25328, + "umbles": 25329, + "Ġradically": 25330, + "ĠDrum": 25331, + "ĠBach": 25332, + "Ġliabilities": 25333, + "ĠOB": 25334, + "ĠElementary": 25335, + "Ġmeme": 25336, + "ynes": 25337, + "Ġfingerprint": 25338, + "ĠGrab": 25339, + "Ġundertake": 25340, + "Members": 25341, + "ĠReader": 25342, + "ĠSims": 25343, + "god": 25344, + "Ġhypothetical": 25345, + "scient": 25346, + "ĠAJ": 25347, + "Ġcharism": 25348, + "Ġadmissions": 25349, + "ĠMissile": 25350, + "trade": 25351, + "Ġexercising": 25352, + "ĠBackground": 25353, + "Written": 25354, + "Ġvocals": 25355, + "whether": 25356, + "Ġvi": 25357, + "ĠWinner": 25358, + "Ġlitter": 25359, + "ĠShooting": 25360, + "STEM": 25361, + "ãĤ¡": 25362, + "ĠAFL": 25363, + "Ġvariability": 25364, + "Ġeats": 25365, + "ĠDPS": 25366, + "brow": 25367, + "Ġelephants": 25368, + "Ġstrat": 25369, + "ĠÅ": 25370, + "Ġsettlers": 25371, + "Matthew": 25372, + "Ġinadvert": 25373, + "HI": 25374, + "ĠIMF": 25375, + "ĠGoal": 25376, + "Ġnerves": 25377, + "Johnson": 25378, + "eye": 25379, + "ablishment": 25380, + "Thursday": 25381, + "BILITY": 25382, + "Had": 25383, + "amoto": 25384, + "hetamine": 25385, + "eps": 25386, + "Ġmitochond": 25387, + "Ġcompressed": 25388, + "ĠTrevor": 25389, + "ĠAnimals": 25390, + "Tool": 25391, + "Lock": 25392, + "Ġtweak": 25393, + "Ġpinch": 25394, + "Ġcancellation": 25395, + "Pot": 25396, + "Ġfocal": 25397, + "ĠAstron": 25398, + "173": 25399, + "ĠASC": 25400, + "ĠOTHER": 25401, + "umni": 25402, + "Ġdemise": 25403, + "dl": 25404, + "Ùħ": 25405, + "Semitism": 25406, + "Ġcracking": 25407, + "Ġcollaborative": 25408, + "Ġexplores": 25409, + "sql": 25410, + "Ġherbs": 25411, + "Ġconfigurations": 25412, + "mis": 25413, + "ĠResult": 25414, + "acey": 25415, + "ĠSmoke": 25416, + "Ġsanct": 25417, + "elia": 25418, + "Ġdegener": 25419, + "Ġdeepest": 25420, + "Ġscreamed": 25421, + "Ġnap": 25422, + "Software": 25423, + "ĠSTAR": 25424, + "EF": 25425, + "ĠXin": 25426, + "sponsored": 25427, + "manship": 25428, + "233": 25429, + "Ġprimaries": 25430, + "Ġfiltering": 25431, + "Ġassemble": 25432, + "mil": 25433, + "ĠMyers": 25434, + "bows": 25435, + "Ġpunched": 25436, + "Mic": 25437, + "Ġinnovations": 25438, + "Ġfunc": 25439, + "ando": 25440, + "Ġfracking": 25441, + "ĠVul": 25442, + "оÐ": 25443, + "oshop": 25444, + "ĠImmun": 25445, + "Ġsettling": 25446, + "Ġadolescents": 25447, + "Ġrebuilding": 25448, + "Ġtransforming": 25449, + "Ġparole": 25450, + "Ġharbor": 25451, + "Ġbooking": 25452, + "otional": 25453, + "ongevity": 25454, + "ĠYo": 25455, + "bug": 25456, + "Ġemerges": 25457, + "ĠMethods": 25458, + "ĠChu": 25459, + "Pres": 25460, + "ĠDungeons": 25461, + "Ġtrailing": 25462, + "ĠRum": 25463, + "ĠHugh": 25464, + "天": 25465, + "ĠEra": 25466, + "ĠBattles": 25467, + "Results": 25468, + "ĠTrading": 25469, + "Ġversa": 25470, + "css": 25471, + "axies": 25472, + "heet": 25473, + "Ġgreed": 25474, + "1989": 25475, + "Ġgardens": 25476, + "Ġcontingent": 25477, + "Park": 25478, + "ĠLeafs": 25479, + "hook": 25480, + "robe": 25481, + "Ġdiplomacy": 25482, + "ĠFuel": 25483, + "ĠInvasion": 25484, + "Ġupgrading": 25485, + "Male": 25486, + "Ġelic": 25487, + "Ġrelentless": 25488, + "ĠCovenant": 25489, + "apesh": 25490, + "ĠTrop": 25491, + "Ty": 25492, + "production": 25493, + "arty": 25494, + "Ġpunches": 25495, + "ako": 25496, + "cyclopedia": 25497, + "ĠRabbit": 25498, + "ĠHDMI": 25499, + "Ġ141": 25500, + "Ġfoil": 25501, + "ItemImage": 25502, + "ĠFG": 25503, + "Ġimplementations": 25504, + "ĠPom": 25505, + "ixtures": 25506, + "Ġawait": 25507, + "Ġ330": 25508, + "amus": 25509, + "Ġumbrella": 25510, + "Ġforesee": 25511, + "separ": 25512, + "Ġcircumcision": 25513, + "Ġperipheral": 25514, + "Say": 25515, + "ĠExpert": 25516, + "Inc": 25517, + "Ġwithdrew": 25518, + "ĠAnders": 25519, + "fried": 25520, + "Ġradioactive": 25521, + "ĠOpening": 25522, + "Ġboarding": 25523, + "ĠND": 25524, + "Ġoverthrow": 25525, + "Activ": 25526, + "WP": 25527, + "ĠActs": 25528, + "×Ļ": 25529, + "Ġmotions": 25530, + "vic": 25531, + "ĠMighty": 25532, + "ĠDefender": 25533, + "aer": 25534, + "Ġthankful": 25535, + "ĠKilling": 25536, + "ĠBris": 25537, + "moil": 25538, + "Ġpredicting": 25539, + "266": 25540, + "choice": 25541, + "Ġkillers": 25542, + "Ġincub": 25543, + "ĠChest": 25544, + "athering": 25545, + "Ġproclaimed": 25546, + "flower": 25547, + "ossom": 25548, + "umbledore": 25549, + "ĠCycling": 25550, + "ĠOccupy": 25551, + "AGES": 25552, + "Pen": 25553, + "ĠYug": 25554, + "Ġpackaged": 25555, + "Ġheightened": 25556, + "cot": 25557, + "stack": 25558, + "Cond": 25559, + "Ġstamps": 25560, + "mage": 25561, + "Ġpersuaded": 25562, + "Ġensl": 25563, + "ĠCardinal": 25564, + "Ġsolitary": 25565, + "Ġpossessing": 25566, + "ĠCork": 25567, + "Ġevid": 25568, + "ĠTay": 25569, + "Ġblues": 25570, + "Ġextremism": 25571, + "Ġlunar": 25572, + "Ġclown": 25573, + "Techn": 25574, + "Ġfestivals": 25575, + "ĠPvP": 25576, + "ĠLar": 25577, + "Ġconsequently": 25578, + "present": 25579, + "Ġsomeday": 25580, + "çİĭ": 25581, + "ĠMeteor": 25582, + "Ġtouring": 25583, + "culture": 25584, + "Ġbeaches": 25585, + "Ship": 25586, + "cause": 25587, + "ĠFlood": 25588, + "ãĥ¯": 25589, + "Ġpurity": 25590, + "those": 25591, + "Ġemission": 25592, + "bolt": 25593, + "Ġchord": 25594, + "ĠScripture": 25595, + "Lu": 25596, + "Ġ${": 25597, + "created": 25598, + "Others": 25599, + "258": 25600, + "Ġelemental": 25601, + "Ġannoyed": 25602, + "ĠAE": 25603, + "dan": 25604, + "ĠSag": 25605, + "Researchers": 25606, + "Ġfairy": 25607, + "âĢĵâĢĵ": 25608, + "============": 25609, + "Smart": 25610, + "GGGG": 25611, + "Ġskeletons": 25612, + "Ġpupils": 25613, + "linked": 25614, + "Ġurgency": 25615, + "enabled": 25616, + "ĠFuck": 25617, + "Ġcouncill": 25618, + "rab": 25619, + "UAL": 25620, + "TI": 25621, + "Ġlifes": 25622, + "Ġconfessed": 25623, + "Bug": 25624, + "Ġharmon": 25625, + "ĠCONFIG": 25626, + "ĠNeutral": 25627, + "Double": 25628, + "Ġstaple": 25629, + "ĠSHA": 25630, + "British": 25631, + "ĠSNP": 25632, + "ATOR": 25633, + "oco": 25634, + "Ġswinging": 25635, + "gex": 25636, + "oleon": 25637, + "plain": 25638, + "ĠMissing": 25639, + "ĠTrophy": 25640, + "vari": 25641, + "ranch": 25642, + "Ġ301": 25643, + "440": 25644, + "0000000000000000": 25645, + "Ġrestoring": 25646, + "Ġhaul": 25647, + "ucing": 25648, + "nerg": 25649, + "Ġfutures": 25650, + "Ġstrategist": 25651, + "question": 25652, + "Ġlateral": 25653, + "ĠBard": 25654, + "Ġsor": 25655, + "ĠRhodes": 25656, + "ĠDowntown": 25657, + "?????-": 25658, + "ĠLit": 25659, + "ĠBened": 25660, + "Ġcoil": 25661, + "street": 25662, + "ĠPortal": 25663, + "FILE": 25664, + "ĠGru": 25665, + "*,": 25666, + "231": 25667, + "neum": 25668, + "Ġsucked": 25669, + "Ġrapper": 25670, + "Ġtendencies": 25671, + "ĠLauren": 25672, + "cellaneous": 25673, + "267": 25674, + "Ġbrowse": 25675, + "Ġoverc": 25676, + "header": 25677, + "oise": 25678, + "Ġbeet": 25679, + "ĠGle": 25680, + "Stay": 25681, + "Ġmum": 25682, + "Ġtyped": 25683, + "Ġdiscounts": 25684, + "Talk": 25685, + "ĠOg": 25686, + "existing": 25687, + "ĠSell": 25688, + "uph": 25689, + "CI": 25690, + "ĠAustrian": 25691, + "ĠWarm": 25692, + "Ġdismissal": 25693, + "Ġaverages": 25694, + "camera": 25695, + "Ġallegiance": 25696, + "LAN": 25697, + "=\"#": 25698, + "Ġcommentators": 25699, + "ĠSetting": 25700, + "ĠMidwest": 25701, + "Ġpharmac": 25702, + "ĠEXP": 25703, + "Ġstainless": 25704, + "Chicago": 25705, + "Ġtan": 25706, + "244": 25707, + "Ġcountryside": 25708, + "ĠVac": 25709, + "295": 25710, + "Ġpinned": 25711, + "Ġcrises": 25712, + "Ġstandardized": 25713, + "Task": 25714, + "ĠJail": 25715, + "ĠDocker": 25716, + "colored": 25717, + "forth": 25718, + "\"},": 25719, + "Ġpatrons": 25720, + "Ġspice": 25721, + "Ġmourn": 25722, + "ĠMood": 25723, + "Ġlaundry": 25724, + "Ġequip": 25725, + "ĠMole": 25726, + "yll": 25727, + "ĠTHC": 25728, + "nation": 25729, + "ĠSherlock": 25730, + "Ġissu": 25731, + "ĠKre": 25732, + "ĠAmericas": 25733, + "ĠAAA": 25734, + "Ġsystematically": 25735, + "Ġcontra": 25736, + "ĠSally": 25737, + "Ġrationale": 25738, + "Ġcarriage": 25739, + "Ġpeaks": 25740, + "Ġcontradiction": 25741, + "ensation": 25742, + "ĠFailure": 25743, + "Ġprops": 25744, + "Ġnamespace": 25745, + "Ġcove": 25746, + "fields": 25747, + "ãĤĭ": 25748, + "Ġwool": 25749, + "ĠCatch": 25750, + "Ġpresumed": 25751, + "ĠDiana": 25752, + "ragon": 25753, + "igi": 25754, + "Ġhamm": 25755, + "Ġstunt": 25756, + "ĠGUI": 25757, + "ĠObservatory": 25758, + "ĠShore": 25759, + "Ġsmells": 25760, + "annah": 25761, + "Ġcockpit": 25762, + "ĠDuterte": 25763, + "850": 25764, + "Ġoppressed": 25765, + "breaker": 25766, + "ĠContribut": 25767, + "ĠPeru": 25768, + "ĠMonsanto": 25769, + "ĠAttempt": 25770, + "Ġcommanding": 25771, + "Ġfridge": 25772, + "ĠRin": 25773, + "ĠChess": 25774, + "uality": 25775, + "Ġol": 25776, + "Republican": 25777, + "ĠGlory": 25778, + "ĠWIN": 25779, + ".......": 25780, + "agent": 25781, + "reading": 25782, + "Ġinh": 25783, + "Jones": 25784, + "Ġclicks": 25785, + "alan": 25786, + "Ġ[];": 25787, + "ĠMajesty": 25788, + "ĠCed": 25789, + "opus": 25790, + "atel": 25791, + "ê": 25792, + "ARC": 25793, + "ĠEcuador": 25794, + "ãĥł": 25795, + "ĠKuro": 25796, + "Ġrituals": 25797, + "Ġcaptive": 25798, + "Ġounce": 25799, + "Ġdisagreement": 25800, + "Ġslog": 25801, + "fuel": 25802, + "Pet": 25803, + "Mail": 25804, + "Ġexercised": 25805, + "Ġsolic": 25806, + "Ġrainfall": 25807, + "Ġdevotion": 25808, + "ĠAssessment": 25809, + "Ġrobotic": 25810, + "options": 25811, + "ĠRP": 25812, + "ĠFamilies": 25813, + "ĠFlames": 25814, + "Ġassignments": 25815, + "007": 25816, + "akedown": 25817, + "Ġvocabulary": 25818, + "Reilly": 25819, + "Ġcaval": 25820, + "gars": 25821, + "Ġsuppressed": 25822, + "ĠSET": 25823, + "ĠJohns": 25824, + "Ġwarp": 25825, + "broken": 25826, + "Ġstatues": 25827, + "Ġadvocated": 25828, + "Ġ275": 25829, + "Ġperil": 25830, + "omorph": 25831, + "ĠFemin": 25832, + "perfect": 25833, + "Ġhatch": 25834, + "Lib": 25835, + "512": 25836, + "Ġlifelong": 25837, + "313": 25838, + "Ġcheeks": 25839, + "Ġnumbered": 25840, + "ĠMug": 25841, + "Body": 25842, + "ravel": 25843, + "Weight": 25844, + "ĠJak": 25845, + "ĠHeath": 25846, + "Ġkissing": 25847, + "ĠJUST": 25848, + "Ġwaving": 25849, + "upload": 25850, + "Ġinsider": 25851, + "ĠProgressive": 25852, + "ĠFilter": 25853, + "tta": 25854, + "ĠBeam": 25855, + "Ġviolently": 25856, + "ipation": 25857, + "Ġskepticism": 25858, + "Ġ1918": 25859, + "ĠAnnie": 25860, + "ĠSI": 25861, + "Ġgenetics": 25862, + "Ġonboard": 25863, + "atl": 25864, + "ĠFriedman": 25865, + "ĠBri": 25866, + "ceptive": 25867, + "Ġpirate": 25868, + "ĠReporter": 25869, + "278": 25870, + "Ġmythology": 25871, + "Ġeclipse": 25872, + "Ġskins": 25873, + "Ġglyph": 25874, + "ingham": 25875, + "Files": 25876, + "Cour": 25877, + "women": 25878, + "Ġregimes": 25879, + "Ġphotographed": 25880, + "Kat": 25881, + "ĠMAX": 25882, + "Officials": 25883, + "Ġunexpectedly": 25884, + "Ġimpressions": 25885, + "Front": 25886, + ";;;;;;;;": 25887, + "Ġsupremacy": 25888, + "Ġsang": 25889, + "Ġaggravated": 25890, + "Ġabruptly": 25891, + "ĠSector": 25892, + "Ġexcuses": 25893, + "Ġcosting": 25894, + "idepress": 25895, + "Stack": 25896, + "ĠRNA": 25897, + "obil": 25898, + "Ġghosts": 25899, + "ldon": 25900, + "atibility": 25901, + "Topics": 25902, + "Ġreimburse": 25903, + "ĠHM": 25904, + "ĠDeg": 25905, + "Ġthief": 25906, + "yet": 25907, + "ogenesis": 25908, + "leaning": 25909, + "ĠKol": 25910, + "ĠBasketball": 25911, + "Ġfi": 25912, + "ĠSeeing": 25913, + "Ġrecycling": 25914, + "Ġ[-": 25915, + "Congress": 25916, + "Ġlectures": 25917, + "Psy": 25918, + "Ġnep": 25919, + "Ġmaid": 25920, + "Ġoriented": 25921, + "AX": 25922, + "Ġrespectful": 25923, + "rene": 25924, + "flush": 25925, + "ĠUnloaded": 25926, + "request": 25927, + "grid": 25928, + "ĠAlternatively": 25929, + "ĠHugo": 25930, + "Ġdecree": 25931, + "ĠBuddhism": 25932, + "andum": 25933, + "Android": 25934, + "ĠCongo": 25935, + "ĠJoyce": 25936, + "Ġacknowledging": 25937, + "hesive": 25938, + "ĠTomorrow": 25939, + "ĠHiro": 25940, + "thren": 25941, + "ĠMaced": 25942, + "Ġhoax": 25943, + "ĠIncreased": 25944, + "ĠPradesh": 25945, + "Wild": 25946, + "______": 25947, + "161": 25948, + "Ġaunt": 25949, + "Ġdistributing": 25950, + "ĠTucker": 25951, + "ĠSSL": 25952, + "ĠWolves": 25953, + "Building": 25954, + "oult": 25955, + "ĠLuo": 25956, + "ĠYas": 25957, + "ĠSpir": 25958, + "ĠShape": 25959, + "ĠCambod": 25960, + "ĠIPv": 25961, + "Ġml": 25962, + "Ġextrad": 25963, + "390": 25964, + "ĠPenny": 25965, + "dream": 25966, + "Ġstationed": 25967, + "optional": 25968, + "eworthy": 25969, + ".": 26700, + "ĠWorkshop": 26701, + "ĠRetail": 26702, + "ĠAvatar": 26703, + "625": 26704, + "Na": 26705, + "ĠVC": 26706, + "ĠSecure": 26707, + "MY": 26708, + "1988": 26709, + "ossip": 26710, + "Ġprostate": 26711, + "Ġunden": 26712, + "Ġgamer": 26713, + "ĠContents": 26714, + "ĠWarhammer": 26715, + "ĠSentinel": 26716, + "310": 26717, + "Ġsegregation": 26718, + "ĠFlex": 26719, + "ĠMAY": 26720, + "Ġdrills": 26721, + "ĠDrugs": 26722, + "Islamic": 26723, + "Ġspur": 26724, + "Ġcafe": 26725, + "Ġimaginary": 26726, + "Ġguiding": 26727, + "Ġswings": 26728, + "ĠTheme": 26729, + "oby": 26730, + "Ġnud": 26731, + "Ġbegging": 26732, + "Ġstrongh": 26733, + "Ġrejecting": 26734, + "Ġpedestrians": 26735, + "ĠProspect": 26736, + "Rare": 26737, + "sle": 26738, + "Ġconcessions": 26739, + "ĠConstitutional": 26740, + "Ġbeams": 26741, + "Ġfibers": 26742, + "poon": 26743, + "Ġinstincts": 26744, + "property": 26745, + "ĠBIG": 26746, + "Sanders": 26747, + "imates": 26748, + "Ġcoating": 26749, + "Ġcorpses": 26750, + "ĠTRUE": 26751, + "checked": 26752, + "Ġ166": 26753, + "Ash": 26754, + "ĠJS": 26755, + "ĠFiction": 26756, + "Ġcommunal": 26757, + "Ġenergetic": 26758, + "oooooooo": 26759, + "Ġnowadays": 26760, + "ILD": 26761, + "ibo": 26762, + "ĠSUV": 26763, + "Ren": 26764, + "Ġdwelling": 26765, + "Silver": 26766, + "Ġtally": 26767, + "ĠMoving": 26768, + "Ġcoward": 26769, + "Ġgenerals": 26770, + "Ġhorns": 26771, + "Ġcirculated": 26772, + "Ġrobbed": 26773, + "ĠUnlimited": 26774, + "Ġharassed": 26775, + "Ġinhibit": 26776, + "Ġcomposer": 26777, + "ĠSpotify": 26778, + "Ġspreads": 26779, + "364": 26780, + "Ġsuicidal": 26781, + "Ġnoises": 26782, + "ĠStur": 26783, + "Ġsaga": 26784, + "ĠKag": 26785, + "iso": 26786, + "Ġtheoretically": 26787, + "Money": 26788, + "Ġsimilarity": 26789, + "Ġsliced": 26790, + "utils": 26791, + "inges": 26792, + "\"-": 26793, + "Ġanth": 26794, + "Ġimped": 26795, + "Module": 26796, + "Throughout": 26797, + "Ġmenus": 26798, + "committee": 26799, + "andi": 26800, + "obj": 26801, + "inav": 26802, + "fired": 26803, + "ĠAbdullah": 26804, + "Ġundead": 26805, + "Ġfonts": 26806, + "Hold": 26807, + "ENG": 26808, + "Ġsustainability": 26809, + "Ġflick": 26810, + "Ġrazor": 26811, + "ĠFest": 26812, + "ĠCharacters": 26813, + "Ġwording": 26814, + "Ġpopulist": 26815, + "Ġcriticizing": 26816, + "Ġmuse": 26817, + "vine": 26818, + "Ġcardboard": 26819, + "Ġkindly": 26820, + "Ġfringe": 26821, + "ĠTheft": 26822, + "icultural": 26823, + "Ġgovernors": 26824, + "Ġ����": 26825, + "Ġ163": 26826, + "Ġtimeout": 26827, + "ĠAuth": 26828, + "Children": 26829, + "AU": 26830, + "Ġredemption": 26831, + "ĠAlger": 26832, + "Ġ1914": 26833, + "Ġwaved": 26834, + "Ġastronauts": 26835, + "ograms": 26836, + "Ġswamp": 26837, + "ĠFinnish": 26838, + "Ġcandle": 26839, + "Ġtonnes": 26840, + "utm": 26841, + "Ġray": 26842, + "Ġspun": 26843, + "Ġfearful": 26844, + "articles": 26845, + "Ġcaus": 26846, + "orically": 26847, + "ĠRequires": 26848, + "ĠGol": 26849, + "Ġpope": 26850, + "Ġinaugural": 26851, + "Ġgle": 26852, + "ADA": 26853, + "ĠISIL": 26854, + "ĠOffensive": 26855, + "Ġwatchdog": 26856, + "Ġbalcon": 26857, + "entity": 26858, + "ĠHoo": 26859, + "Ġgallon": 26860, + "ACC": 26861, + "Ġdoubling": 26862, + "Ġimplication": 26863, + "ĠSight": 26864, + "Ġdoctr": 26865, + "-------": 26866, + "Ġ\\\\": 26867, + "Ġmalt": 26868, + "Roll": 26869, + "Ġâī¥": 26870, + "Ġrecap": 26871, + "adding": 26872, + "uces": 26873, + "ĠBend": 26874, + "figure": 26875, + "Ġturkey": 26876, + "Ġsocietal": 26877, + "ĠTickets": 26878, + "Ġcommercially": 26879, + "Ġspicy": 26880, + "Ġ216": 26881, + "ĠRamp": 26882, + "Ġsuperiority": 26883, + "ï": 26884, + "ĠTracker": 26885, + "Carl": 26886, + "ĠCoy": 26887, + "ĠPatriot": 26888, + "Ġconsulted": 26889, + "Ġlistings": 26890, + "Ġslew": 26891, + "reenshot": 26892, + "ĠGone": 26893, + "Ġ[...]": 26894, + "309": 26895, + "Ġhottest": 26896, + "ر": 26897, + "Ġrocky": 26898, + "ĠDiaz": 26899, + "Ġmassage": 26900, + "Ġparaly": 26901, + "Ġpony": 26902, + "Az": 26903, + "Ġcartridge": 26904, + "ĠNZ": 26905, + "Ġsnack": 26906, + "ĠLamar": 26907, + "plement": 26908, + "ĠLeslie": 26909, + "Ġmater": 26910, + "Ġsnipp": 26911, + "246": 26912, + "Ġjointly": 26913, + "ĠBrisbane": 26914, + "ĠiPod": 26915, + "Ġpumping": 26916, + "Ġgoat": 26917, + "ĠSharon": 26918, + "ealing": 26919, + "Ġcoron": 26920, + "Ġanomal": 26921, + "rahim": 26922, + "ĠConnection": 26923, + "Ġsculpture": 26924, + "Ġscheduling": 26925, + "ĠDaddy": 26926, + "athing": 26927, + "Ġeyebrows": 26928, + "Ġcurved": 26929, + "Ġsentiments": 26930, + "Ġdrafting": 26931, + "Drop": 26932, + "([": 26933, + "Ġnominal": 26934, + "ĠLeadership": 26935, + "ĠGrow": 26936, + "Ġ176": 26937, + "Ġconstructive": 26938, + "ivation": 26939, + "Ġcorrupted": 26940, + "gerald": 26941, + "ĠCros": 26942, + "ĠChester": 26943, + "ĠLap": 26944, + "ãģª": 26945, + "OTH": 26946, + "DATA": 26947, + "Ġalmond": 26948, + "probably": 26949, + "Imp": 26950, + "Ġfeast": 26951, + "ĠWarcraft": 26952, + "Flor": 26953, + "Ġcheckpoint": 26954, + "Ġtranscription": 26955, + "Ġ204": 26956, + "Ġtweaks": 26957, + "Ġrelieve": 26958, + "Science": 26959, + "Ġperformer": 26960, + "Zone": 26961, + "Ġturmoil": 26962, + "igated": 26963, + "hibit": 26964, + "ĠCafe": 26965, + "themed": 26966, + "Ġfluor": 26967, + "bench": 26968, + "Ġdecom": 26969, + "ĠUnt": 26970, + "ĠBarrett": 26971, + "ĠFacts": 26972, + "Ġtasting": 26973, + "ĠPTSD": 26974, + "ĠSeal": 26975, + "ĠJudaism": 26976, + "ĠDynamic": 26977, + "ĠCors": 26978, + "Ve": 26979, + "ĠMing": 26980, + "ĠTransform": 26981, + "von": 26982, + "ĠDefenders": 26983, + "ĠTactical": 26984, + "ĠVon": 26985, + "ĠUnivers": 26986, + "Ġdistorted": 26987, + "ĠBreath": 26988, + "?'\"": 26989, + "Ġagon": 26990, + "ĠDeadly": 26991, + "Ġlan": 26992, + "ĠCycle": 26993, + "orned": 26994, + "Ġreliably": 26995, + "Ġglor": 26996, + "ĠMonkey": 26997, + "ãĥ¡": 26998, + "Ġadren": 26999, + "Ġmicrowave": 27000, + "ĠAlban": 27001, + "ircraft": 27002, + "digit": 27003, + "smart": 27004, + "ĠDread": 27005, + "¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯": 27006, + "{{": 27007, + "ĠRochester": 27008, + "Ġsimplified": 27009, + "Ġinflicted": 27010, + "Ġtakeover": 27011, + "Ġyourselves": 27012, + "aditional": 27013, + "Ġmuscular": 27014, + "KS": 27015, + "Ġingen": 27016, + "Tax": 27017, + "ĠFeature": 27018, + "277": 27019, + "Ġcruc": 27020, + "Ġcrate": 27021, + "Ġunidentified": 27022, + "Ġacclaimed": 27023, + "ĠManga": 27024, + "ĠFrances": 27025, + "ĠNepal": 27026, + "ĠGerald": 27027, + "ĠKuwait": 27028, + "Ġslain": 27029, + "ĠHeb": 27030, + "ĠGoku": 27031, + "ãģ®æ": 27032, + "286": 27033, + "Mrs": 27034, + "ĠCody": 27035, + "ĠSanctuary": 27036, + "016": 27037, + "Ġdismant": 27038, + "Ġdataset": 27039, + "ĠHond": 27040, + "buck": 27041, + "ĠPatterson": 27042, + "Ġpalette": 27043, + "ĠGD": 27044, + "icol": 27045, + "ĠLodge": 27046, + "Ġplanetary": 27047, + "akin": 27048, + "ĠRegistered": 27049, + "abwe": 27050, + "ĠPetersburg": 27051, + "Ġhailed": 27052, + "ĠPiece": 27053, + "Sche": 27054, + "ĠDOJ": 27055, + "Ġenumer": 27056, + "181": 27057, + "ĠObserver": 27058, + "ĠBold": 27059, + "founded": 27060, + "commerce": 27061, + "Ġexploits": 27062, + "ĠFinding": 27063, + "URN": 27064, + "ĠSne": 27065, + "ĠAcid": 27066, + "ayette": 27067, + "ĠValues": 27068, + "Ġdrastic": 27069, + "Ġarchitectural": 27070, + "Ġ\".": 27071, + "×ķ": 27072, + "umped": 27073, + "Ġwrapping": 27074, + "Ġwidow": 27075, + "ĠSlayer": 27076, + "lace": 27077, + "once": 27078, + "Germany": 27079, + "avoid": 27080, + "Ġtemples": 27081, + "PAR": 27082, + "ô": 27083, + "ĠLucifer": 27084, + "ĠFlickr": 27085, + "lov": 27086, + "forces": 27087, + "Ġscouting": 27088, + "Ġlouder": 27089, + "tesy": 27090, + "Ġbeforehand": 27091, + "Äĵ": 27092, + "ĠNeon": 27093, + "ĠWol": 27094, + "ĠTypically": 27095, + "ĠPolitico": 27096, + "-+-+": 27097, + "Ġbuilder": 27098, + "Ġderive": 27099, + "Kill": 27100, + "Ġpoker": 27101, + "Ġambiguous": 27102, + "Ġlifts": 27103, + "Ġcyt": 27104, + "Ġribs": 27105, + "oodle": 27106, + "ĠSounds": 27107, + "hair": 27108, + "ĠSyndrome": 27109, + "tf": 27110, + "Ġproportional": 27111, + "uid": 27112, + "Ġpertaining": 27113, + "ĠKindle": 27114, + "ĠNegro": 27115, + "Ġreiterated": 27116, + "ĠTonight": 27117, + "oths": 27118, + "ĠCornell": 27119, + "Ġowing": 27120, + "Ġ208": 27121, + "elfare": 27122, + "ocating": 27123, + "ĠBirds": 27124, + "Subscribe": 27125, + "Ġessays": 27126, + "Ġburdens": 27127, + "Ġillustrations": 27128, + "arious": 27129, + "ERAL": 27130, + "ĠCalcul": 27131, + "Ġxen": 27132, + "ĠLinkedIn": 27133, + "ĠJung": 27134, + "Ġredesign": 27135, + "Connor": 27136, + "296": 27137, + "Ġreversal": 27138, + "ĠAdelaide": 27139, + "ĠLL": 27140, + "Ġsinking": 27141, + "Ġgum": 27142, + "USH": 27143, + "capt": 27144, + "ĠGrimm": 27145, + "Ġfootsteps": 27146, + "ĠCBD": 27147, + "ispers": 27148, + "Ġprose": 27149, + "Wednesday": 27150, + "ĠMovies": 27151, + "edin": 27152, + "Ġoverturned": 27153, + "Ġcontentious": 27154, + "USB": 27155, + "~~~~~~~~~~~~~~~~": 27156, + "ĠCopper": 27157, + "Ġpointless": 27158, + "NV": 27159, + "values": 27160, + "olphin": 27161, + "dain": 27162, + "Ġdeposited": 27163, + "ĠGW": 27164, + "Ġpreceded": 27165, + "ĠCla": 27166, + "ĠGolem": 27167, + "ĠNim": 27168, + "Ġβ": 27169, + "ĠEngineers": 27170, + "middle": 27171, + "Ġflatt": 27172, + "operative": 27173, + "Ġcouncils": 27174, + "imbabwe": 27175, + "elin": 27176, + "Ġstressful": 27177, + "ĠLD": 27178, + "Ġresh": 27179, + "lake": 27180, + "Ġwheelchair": 27181, + "ĠAlternative": 27182, + "Ġoptimize": 27183, + "operation": 27184, + "Ġpeek": 27185, + "Ġoneself": 27186, + "igil": 27187, + "Ġtransitions": 27188, + "opathy": 27189, + "blank": 27190, + "Ġ169": 27191, + "171": 27192, + "________________________________________________________________": 27193, + "Ġlaundering": 27194, + "Enc": 27195, + "ĠDEC": 27196, + "Ġworkouts": 27197, + "Ġspikes": 27198, + "Ġdinosaurs": 27199, + "Ġdiscriminatory": 27200, + "Pool": 27201, + "Rather": 27202, + "385": 27203, + "RNA": 27204, + "testers": 27205, + "eto": 27206, + "ĠIdentity": 27207, + "Ġvein": 27208, + "ĠBurton": 27209, + "Ġarcade": 27210, + "420": 27211, + "Ultimately": 27212, + "ĠSadly": 27213, + "ð": 27214, + "pill": 27215, + "Ġcubic": 27216, + "ĠSpectrum": 27217, + "these": 27218, + "states": 27219, + "Ġunofficial": 27220, + "hawks": 27221, + "ĠEVERY": 27222, + "Ġrainbow": 27223, + "Ġincarceration": 27224, + "anding": 27225, + "Ġsyll": 27226, + "ĠEverton": 27227, + "Ġ179": 27228, + "ĠSerbia": 27229, + "Ġ189": 27230, + "meter": 27231, + "ĠMickey": 27232, + "Ġantiqu": 27233, + "Ġfactual": 27234, + "neck": 27235, + "ĠNare": 27236, + "norm": 27237, + "must": 27238, + "Ġhighways": 27239, + "Ġglam": 27240, + "Ġdividing": 27241, + "ĠSquadron": 27242, + "ĠMartha": 27243, + "Ġbirths": 27244, + "Cover": 27245, + "////////////////": 27246, + "ĠWong": 27247, + "Phot": 27248, + "ĠALS": 27249, + "rio": 27250, + "ĠNonetheless": 27251, + "ĠLemon": 27252, + "Ġ206": 27253, + "ĠEE": 27254, + "Ġderivative": 27255, + "ĠWWII": 27256, + "vote": 27257, + "Ġtherein": 27258, + "Ġseparating": 27259, + "446": 27260, + "sync": 27261, + "ĠStreets": 27262, + "Ġratt": 27263, + "Ġmunicipality": 27264, + "ĠShortly": 27265, + "Ġmonk": 27266, + "),\"": 27267, + "Ġscrub": 27268, + "Ġoperatives": 27269, + "Neither": 27270, + "Place": 27271, + "ĠLimit": 27272, + "Female": 27273, + "ĠActor": 27274, + "Character": 27275, + "Ġconstituted": 27276, + "357": 27277, + "Ġprotested": 27278, + "ĠStraw": 27279, + "ĠHeight": 27280, + "ilda": 27281, + "ĠTyph": 27282, + "Ġfloods": 27283, + "Ġcosmetic": 27284, + "WAY": 27285, + "perture": 27286, + "upon": 27287, + "tons": 27288, + "essing": 27289, + "ĠPocket": 27290, + "Ġrooft": 27291, + "ĠCaucas": 27292, + "Ġantidepress": 27293, + "Ġincompatible": 27294, + "ECD": 27295, + "Ġopera": 27296, + "ĠContest": 27297, + "Ġgenerators": 27298, + "lime": 27299, + "Defense": 27300, + "1987": 27301, + "forum": 27302, + "Ġsavage": 27303, + "ĠHungarian": 27304, + "nz": 27305, + "Ġmetallic": 27306, + "Ġexpelled": 27307, + "Ġresidency": 27308, + "Ġdresses": 27309, + "666": 27310, + "ĠClement": 27311, + "fires": 27312, + "Category": 27313, + "Ġgeek": 27314, + "alis": 27315, + "Ġcemetery": 27316, + "educated": 27317, + "Ġcrawl": 27318, + "ĠUnable": 27319, + "ĠTyson": 27320, + "akis": 27321, + "Ġpardon": 27322, + "ĠWra": 27323, + "Ġstrengthened": 27324, + "ĠFors": 27325, + "335": 27326, + "ĠHC": 27327, + "ĠMond": 27328, + "Ġvisuals": 27329, + "ĠBeatles": 27330, + "ettlement": 27331, + "Ġï": 27332, + "gro": 27333, + "Ġbash": 27334, + "Ġpoorest": 27335, + "Ġexcel": 27336, + "Ġaspirations": 27337, + "ĠMunicip": 27338, + "ensible": 27339, + "Ġceremonies": 27340, + "Ġintimidation": 27341, + "ĠCONTR": 27342, + "beck": 27343, + "ĠKap": 27344, + "asu": 27345, + "Ġtrademarks": 27346, + "ĠSew": 27347, + "ĠCompetition": 27348, + "network": 27349, + "ĠArri": 27350, + "ĠTet": 27351, + "Roaming": 27352, + "WC": 27353, + "Dat": 27354, + "Ġsob": 27355, + "Ġpairing": 27356, + "Ġoverdose": 27357, + "SAY": 27358, + "aber": 27359, + "Ġrevolt": 27360, + "ĠFah": 27361, + "acting": 27362, + "eq": 27363, + "estation": 27364, + "Fight": 27365, + "ĠMarks": 27366, + "273": 27367, + "Ġ178": 27368, + "Raw": 27369, + "ãģĭ": 27370, + "349": 27371, + "blocks": 27372, + "Ġverge": 27373, + "estine": 27374, + "ĠPodesta": 27375, + "Ġinvasive": 27376, + "Ġprofoundly": 27377, + "ĠAo": 27378, + "each": 27379, + "Ġlest": 27380, + "interpret": 27381, + "Ġshrinking": 27382, + "Ġerrone": 27383, + "Ġchees": 27384, + "lys": 27385, + "ĠIvy": 27386, + "ĠDirectory": 27387, + "Ġhinted": 27388, + "VICE": 27389, + "Ġcontacting": 27390, + "ĠGent": 27391, + "hei": 27392, + "Ġlabeling": 27393, + "Ġmercury": 27394, + "ĠLite": 27395, + "Ġexpires": 27396, + "Ġdestabil": 27397, + "ritis": 27398, + "cu": 27399, + "Ġfeathers": 27400, + "Ġsteer": 27401, + "Ġprogrammed": 27402, + "ĠVader": 27403, + "Going": 27404, + "ĠElim": 27405, + "Ġyo": 27406, + "ĠMiche": 27407, + "Ġ203": 27408, + "Ġsleeves": 27409, + "Ġbully": 27410, + "ĠHumans": 27411, + "368": 27412, + "Ġcompress": 27413, + "ĠBanner": 27414, + "ARS": 27415, + "Ġawhile": 27416, + "Ġcalib": 27417, + "Ġsponsorship": 27418, + "ĠDifficulty": 27419, + "ĠPapers": 27420, + "Ġidentifier": 27421, + "}.": 27422, + "Ġyog": 27423, + "ĠShia": 27424, + "Ġcleanup": 27425, + "Ġvibe": 27426, + "introdu": 27427, + "imming": 27428, + "Australia": 27429, + "Ġoutlines": 27430, + "ĠYoutube": 27431, + "train": 27432, + "ĠMakes": 27433, + "Ġdeported": 27434, + "Ġcentr": 27435, + "ĠDug": 27436, + "ĠBoulder": 27437, + "ĠBuffy": 27438, + "Ġinjunction": 27439, + "ĠHarley": 27440, + "ĠGroups": 27441, + "ĠDumbledore": 27442, + "ĠClara": 27443, + "Ġ\"-": 27444, + "Ġsacrificed": 27445, + "eph": 27446, + "Shadow": 27447, + "ibling": 27448, + "Ġfreelance": 27449, + "Ġevidently": 27450, + "phal": 27451, + "Ġretains": 27452, + "Mir": 27453, + "Ġfinite": 27454, + "dar": 27455, + "ĠCous": 27456, + "Ġrepaired": 27457, + "Ġperiodic": 27458, + "Ġchampionships": 27459, + "Ġasteroid": 27460, + "blind": 27461, + "Ġexpressly": 27462, + "ĠAstros": 27463, + "Ġscaled": 27464, + "Ġgeographical": 27465, + "ĠRapids": 27466, + "Enjoy": 27467, + "Ġelastic": 27468, + "ĠMohamed": 27469, + "Market": 27470, + "begin": 27471, + "Ġdiscovers": 27472, + "Ġtelecommunications": 27473, + "Ġscanner": 27474, + "Ġenlarge": 27475, + "Ġsharks": 27476, + "Ġpsychedel": 27477, + "ĠRouge": 27478, + "Ġsnapshot": 27479, + "isine": 27480, + "XP": 27481, + "Ġpesticides": 27482, + "ĠLSD": 27483, + "ĠDistribution": 27484, + "really": 27485, + "Ġdegradation": 27486, + "Ġdisguise": 27487, + "Ġbiom": 27488, + "ĠEXT": 27489, + "Ġequations": 27490, + "Ġhazards": 27491, + "ĠCompared": 27492, + ")*": 27493, + "Ġvirtues": 27494, + "Ġelders": 27495, + "Ġenhancing": 27496, + "ĠAcross": 27497, + "eros": 27498, + "angling": 27499, + "Ġcombust": 27500, + "ucci": 27501, + "Ġconcussion": 27502, + "Ġcontraception": 27503, + "ĠKang": 27504, + "Ġexpresses": 27505, + "Ġaux": 27506, + "ĠPione": 27507, + "Ġexhibits": 27508, + "Debug": 27509, + "OTAL": 27510, + "ĠAlready": 27511, + "ĠWheeler": 27512, + "Ġexpands": 27513, + "?:": 27514, + "Ġreconciliation": 27515, + "Ġpirates": 27516, + "Ġpurse": 27517, + "Ġdiscourage": 27518, + "Ġspectacle": 27519, + "Rank": 27520, + "Ġwraps": 27521, + "ĠThought": 27522, + "Ġimpending": 27523, + "Opp": 27524, + "ĠAnglo": 27525, + "ĠEUR": 27526, + "Ġscrewed": 27527, + "retched": 27528, + "Ġencouragement": 27529, + "models": 27530, + "Ġconfuse": 27531, + "mmm": 27532, + "ĠVitamin": 27533, + "âĸijâĸij": 27534, + "Cru": 27535, + "Ġknights": 27536, + "Ġdiscard": 27537, + "Ġbishops": 27538, + "ĠWear": 27539, + "ĠGarrett": 27540, + "kan": 27541, + "ãĥŁ": 27542, + "Ġmasculine": 27543, + "capital": 27544, + "ĠAus": 27545, + "Ġfatally": 27546, + "thanks": 27547, + "ĠAU": 27548, + "ĠGut": 27549, + "1200": 27550, + "Ġ00000000": 27551, + "Ġsurrog": 27552, + "ĠBIOS": 27553, + "raits": 27554, + "ĠWatts": 27555, + "Ġresurrection": 27556, + "ĠElectoral": 27557, + "ĠTips": 27558, + "4000": 27559, + "Ġnutrient": 27560, + "Ġdepicting": 27561, + "Ġsprink": 27562, + "Ġmuff": 27563, + "ĠLIM": 27564, + "ĠSample": 27565, + "psc": 27566, + "ibi": 27567, + "generated": 27568, + "Ġspecimens": 27569, + "Ġdissatisf": 27570, + "Ġtailored": 27571, + "Ġholdings": 27572, + "ĠMonthly": 27573, + "ĠEat": 27574, + "poons": 27575, + "Ġnec": 27576, + "ĠCage": 27577, + "ĠLotus": 27578, + "ĠLantern": 27579, + "Ġfrontier": 27580, + "Ġpensions": 27581, + "Ġjoked": 27582, + "ĠHardy": 27583, + "=-=-=-=-": 27584, + "rade": 27585, + "UID": 27586, + "Ġrails": 27587, + "Ġemit": 27588, + "Ġslate": 27589, + "Ġsmug": 27590, + "Ġspit": 27591, + "ĠCalls": 27592, + "ĠJacobs": 27593, + "feat": 27594, + "ĠUE": 27595, + "Ġrestruct": 27596, + "Ġregeneration": 27597, + "Ġenergies": 27598, + "ĠConnor": 27599, + "OHN": 27600, + "ĠCheese": 27601, + "Ġger": 27602, + "Ġresurrect": 27603, + "management": 27604, + "NW": 27605, + "Ġpresently": 27606, + "ĠBruins": 27607, + "Member": 27608, + "ĠMang": 27609, + "idan": 27610, + "Ġboosting": 27611, + "wyn": 27612, + "+.": 27613, + "requisite": 27614, + "ĠNYPD": 27615, + "ĠMegan": 27616, + "ĠConditions": 27617, + "Ġpics": 27618, + "nesium": 27619, + "ĠRash": 27620, + "Ġ174": 27621, + "ĠDucks": 27622, + "Ġembro": 27623, + "zu": 27624, + "onian": 27625, + "religious": 27626, + "Ġcraz": 27627, + "ĠACA": 27628, + "ĠZucker": 27629, + "EMA": 27630, + "ĠPros": 27631, + "Weapon": 27632, + "ĠKnox": 27633, + "ĠArduino": 27634, + "Ġstove": 27635, + "Ġheavens": 27636, + "ĠPurchase": 27637, + "Ġherd": 27638, + "Ġfundraiser": 27639, + "Digital": 27640, + "5000": 27641, + "Ġproponents": 27642, + "/âĢĭ": 27643, + "Ġjelly": 27644, + "ĠVisa": 27645, + "Ġmonks": 27646, + "Ġadvancement": 27647, + "ĠWer": 27648, + "Ġ187": 27649, + "eus": 27650, + "ertility": 27651, + "Ġfetal": 27652, + "Ġ1936": 27653, + "Lo": 27654, + "Ġoutfits": 27655, + "Ġstaircase": 27656, + "bomb": 27657, + "Ġcustomized": 27658, + "clair": 27659, + "Tree": 27660, + "Ġmapped": 27661, + "ĠConsidering": 27662, + "ĠTorres": 27663, + "Ġmethyl": 27664, + "Ġapproximate": 27665, + "Ġdoom": 27666, + "ĠHansen": 27667, + "Ġcrossover": 27668, + "Ġstandalone": 27669, + "ä¼": 27670, + "Ġinvites": 27671, + "Ġgraveyard": 27672, + "Ġhp": 27673, + "DonaldTrump": 27674, + "Ġescort": 27675, + "Gar": 27676, + "Ġpredecessors": 27677, + "Ġhay": 27678, + "Ġenzyme": 27679, + "ĠStraight": 27680, + "visors": 27681, + "Ing": 27682, + "aneously": 27683, + "ĠApplied": 27684, + "Ġfec": 27685, + "ĠDurant": 27686, + "Ġoutspoken": 27687, + "orb": 27688, + "Ġzeal": 27689, + "Ġdisgrace": 27690, + "').": 27691, + "ĠCheng": 27692, + "289": 27693, + "ĠRena": 27694, + "ĠSuicide": 27695, + "294": 27696, + "Ġoutraged": 27697, + "ĠNewman": 27698, + "ĠNvidia": 27699, + "ĠAber": 27700, + "ĠBers": 27701, + "Ġrecreation": 27702, + "Window": 27703, + "ĠDP": 27704, + "xe": 27705, + "Ġpedoph": 27706, + "Ġfallout": 27707, + "amboo": 27708, + "Ġpresentations": 27709, + "ĠApps": 27710, + "Ġhtml": 27711, + "345": 27712, + "ĠXXX": 27713, + "Ġrubbing": 27714, + "ĠLeather": 27715, + "Ġhumidity": 27716, + "seys": 27717, + "established": 27718, + "ĠUnits": 27719, + "646": 27720, + "Ġrespectable": 27721, + "Auto": 27722, + "Ġthriving": 27723, + "ĠInnovation": 27724, + "angs": 27725, + "Extra": 27726, + "regulation": 27727, + "298": 27728, + "pick": 27729, + "Examples": 27730, + "ĠCJ": 27731, + "Attack": 27732, + "Ġdracon": 27733, + "LT": 27734, + "Ġsticker": 27735, + "rers": 27736, + "Ġsunny": 27737, + "Iss": 27738, + "regulated": 27739, + "dim": 27740, + "ĠAbstract": 27741, + "Ġhusbands": 27742, + "Office": 27743, + "omination": 27744, + "itars": 27745, + "ANGE": 27746, + "ascal": 27747, + "ĠKris": 27748, + "ĠInfantry": 27749, + "Ġmalf": 27750, + "ĠAthe": 27751, + "ĠRally": 27752, + "balanced": 27753, + "........................": 27754, + "OUP": 27755, + "Ġmolecule": 27756, + "metics": 27757, + "ĠSplit": 27758, + "ĠInstructions": 27759, + "ĠNights": 27760, + "cards": 27761, + "Ġtug": 27762, + "Ġcone": 27763, + "åŃ": 27764, + "Ġtx": 27765, + "ĠDiscussion": 27766, + "Ġcatastrophe": 27767, + "ppe": 27768, + "gio": 27769, + "Ġcommunism": 27770, + "Ġhalted": 27771, + "ĠGuant": 27772, + "clean": 27773, + "ĠSched": 27774, + "ĠKanye": 27775, + "Ġwander": 27776, + "ĠSeriously": 27777, + "Ġ188": 27778, + "ennial": 27779, + "follow": 27780, + "productive": 27781, + "ĠFlow": 27782, + "ĠSail": 27783, + "Ġcraw": 27784, + "Ġsimulations": 27785, + "oru": 27786, + "angles": 27787, + "ĠNolan": 27788, + "Ġmenstru": 27789, + "470": 27790, + "Ġ207": 27791, + "aja": 27792, + "Ġcasually": 27793, + "boarding": 27794, + "Ġ222": 27795, + "ovy": 27796, + "ĠNumbers": 27797, + "umat": 27798, + "OE": 27799, + "287": 27800, + "ĠClemson": 27801, + "Ġcerts": 27802, + "Ġslid": 27803, + "ĠTribe": 27804, + "Ġtoast": 27805, + "Ġfortunes": 27806, + "Ġfals": 27807, + "ĠCommittees": 27808, + "Ġgp": 27809, + "Ġfiery": 27810, + "ĠNets": 27811, + "ĠAnime": 27812, + "Package": 27813, + "ĠCompare": 27814, + "laughter": 27815, + "infect": 27816, + "Ġatrocities": 27817, + "Ġjustices": 27818, + "Ġinsults": 27819, + "ĠVernon": 27820, + "Ġshaken": 27821, + "Ġpersona": 27822, + "estamp": 27823, + "367": 27824, + "brain": 27825, + "Ġexperimenting": 27826, + "Ken": 27827, + "ĠElectronics": 27828, + "Ġ161": 27829, + "domain": 27830, + "Ġgraphical": 27831, + "bishop": 27832, + "Ġwhopping": 27833, + "ĠEvangel": 27834, + "Ġadvertisers": 27835, + "ĠSpear": 27836, + "Ġbids": 27837, + "Ġdestroys": 27838, + "utz": 27839, + "Ġundersc": 27840, + "ĠADD": 27841, + "Ġants": 27842, + "ĠCum": 27843, + "ipples": 27844, + "ĠFill": 27845, + "Ġglanced": 27846, + "Ġindicted": 27847, + "ĠEff": 27848, + "Ġmiscon": 27849, + "ĠDesktop": 27850, + "Ġabide": 27851, + "ãĥĢ": 27852, + "ĠIo": 27853, + "ĠCoul": 27854, + "Ġcapsule": 27855, + "ĠChrys": 27856, + "MON": 27857, + "Ġundes": 27858, + "ĠIRA": 27859, + "Ġcitation": 27860, + "Ġdictate": 27861, + "ĠNetworks": 27862, + "ĠConflict": 27863, + "ĠStuff": 27864, + "xa": 27865, + "isec": 27866, + "ĠChemistry": 27867, + "Ġquarterly": 27868, + "Williams": 27869, + "anan": 27870, + "Opt": 27871, + "ĠAlexandria": 27872, + "outheastern": 27873, + "ĠSpringfield": 27874, + "ĠBlacks": 27875, + "Ġgeography": 27876, + "242": 27877, + "Ġutmost": 27878, + "ĠExxon": 27879, + "abouts": 27880, + "EVA": 27881, + "ĠEnable": 27882, + "ĠBarr": 27883, + "Ġdisagreed": 27884, + "ĠCyprus": 27885, + "Ġdementia": 27886, + "Ġlabs": 27887, + "Ġubiquitous": 27888, + "ĠLOVE": 27889, + "Ġconsolidated": 27890, + "sr": 27891, + "Ġcreamy": 27892, + "ĠTimber": 27893, + "Regardless": 27894, + "ĠCertificate": 27895, + "Ġ\"...": 27896, + "ogenous": 27897, + "Captain": 27898, + "Ġinsulting": 27899, + "ĠSoros": 27900, + "ĠInstr": 27901, + "ĠBulgaria": 27902, + "better": 27903, + "Ġsucking": 27904, + "ĠDavidson": 27905, + "atz": 27906, + "Ġcollateral": 27907, + "gif": 27908, + "Ġplagued": 27909, + "ĠCancel": 27910, + "ĠGardner": 27911, + "RB": 27912, + "Ġsixteen": 27913, + "Remove": 27914, + "uristic": 27915, + "cook": 27916, + "Rod": 27917, + "Ġcomprising": 27918, + "fle": 27919, + ")âĢĶ": 27920, + "ĠViking": 27921, + "growth": 27922, + "agonal": 27923, + "Ġsrf": 27924, + "afety": 27925, + "mot": 27926, + "Nearly": 27927, + "stown": 27928, + "ĠFactor": 27929, + "Ġautomobile": 27930, + "Ġprocedural": 27931, + "mask": 27932, + "ampires": 27933, + "Ġdisappears": 27934, + "jab": 27935, + "315": 27936, + "Ġ1951": 27937, + "needed": 27938, + "Ġdaring": 27939, + "leader": 27940, + "Ġpodium": 27941, + "Ġunhealthy": 27942, + "Ġmund": 27943, + "Ġpyramid": 27944, + "ocre": 27945, + "Ġkissed": 27946, + "Ġdreamed": 27947, + "ĠFantastic": 27948, + "ĠGly": 27949, + "åĬ": 27950, + "Ġgreatness": 27951, + "Ġspices": 27952, + "Ġmetropolitan": 27953, + "Ġcompuls": 27954, + "iets": 27955, + "1016": 27956, + "ĠSham": 27957, + "ĠPyr": 27958, + "flies": 27959, + "ĠMidnight": 27960, + "Ġswallowed": 27961, + "Ġgenres": 27962, + "ĠLucky": 27963, + "ĠRewards": 27964, + "Ġdispatch": 27965, + "ĠIPA": 27966, + "ĠApply": 27967, + "Ġaven": 27968, + "alities": 27969, + "312": 27970, + "things": 27971, + "Ġ().": 27972, + "Ġmates": 27973, + "ĠSz": 27974, + "ĠCOP": 27975, + "olate": 27976, + "OFF": 27977, + "Ġrecharge": 27978, + "caps": 27979, + "ĠYorker": 27980, + "icone": 27981, + "Ġgalaxies": 27982, + "ileaks": 27983, + "Dave": 27984, + "ĠPuzz": 27985, + "ĠCeltic": 27986, + "ĠAFC": 27987, + "276": 27988, + "ĠSons": 27989, + "Ġaffirmative": 27990, + "Hor": 27991, + "Ġtutorials": 27992, + "ĠCITY": 27993, + "ĠRosa": 27994, + "ĠExtension": 27995, + "Series": 27996, + "Ġfats": 27997, + "Ġrab": 27998, + "lis": 27999, + "Ġunic": 28000, + "Ġeve": 28001, + "ĠSpin": 28002, + "Ġadulthood": 28003, + "typ": 28004, + "Ġsectarian": 28005, + "Ġcheckout": 28006, + "ĠCycl": 28007, + "Single": 28008, + "Ġmartyr": 28009, + "Ġchilling": 28010, + "888": 28011, + "oufl": 28012, + "Ġ];": 28013, + "Ġcongestion": 28014, + "mk": 28015, + "ĠWhereas": 28016, + "Ġ1938": 28017, + "urrencies": 28018, + "erion": 28019, + "Ġboast": 28020, + "ĠPatients": 28021, + "Ġchap": 28022, + "ĠBD": 28023, + "realDonaldTrump": 28024, + "Ġexamines": 28025, + "hov": 28026, + "Ġstartling": 28027, + "ĠBabylon": 28028, + "wid": 28029, + "omew": 28030, + "brance": 28031, + "ĠOdyssey": 28032, + "wig": 28033, + "Ġtorch": 28034, + "ĠVox": 28035, + "ĠMoz": 28036, + "ĠTroll": 28037, + "ĠAns": 28038, + "Similarly": 28039, + "ĠFul": 28040, + "006": 28041, + "Unless": 28042, + "ĠAlone": 28043, + "stead": 28044, + "ĠPublisher": 28045, + "rights": 28046, + "tu": 28047, + "ĠDoesn": 28048, + "Ġprofessionally": 28049, + "Ġclo": 28050, + "icz": 28051, + "Ġsteals": 28052, + "Ġá": 28053, + "1986": 28054, + "Ġsturdy": 28055, + "ĠJohann": 28056, + "Ġmedals": 28057, + "Ġfilings": 28058, + "ĠFraser": 28059, + "done": 28060, + "Ġmultinational": 28061, + "Ġfeder": 28062, + "Ġworthless": 28063, + "Ġpest": 28064, + "Yesterday": 28065, + "ankind": 28066, + "Ġgays": 28067, + "Ġborne": 28068, + "ĠPOS": 28069, + "Picture": 28070, + "Ġpercentages": 28071, + "251": 28072, + "rame": 28073, + "Ġpotions": 28074, + "AMD": 28075, + "ĠLebanese": 28076, + "Ġrang": 28077, + "ĠLSU": 28078, + "ongs": 28079, + "Ġpeninsula": 28080, + "ĠClause": 28081, + "ALK": 28082, + "oha": 28083, + "ĠMacBook": 28084, + "Ġunanimous": 28085, + "Ġlenders": 28086, + "Ġhangs": 28087, + "Ġfranchises": 28088, + "orers": 28089, + "ĠUpdates": 28090, + "Ġisolate": 28091, + "andro": 28092, + "Soon": 28093, + "Ġdisruptive": 28094, + "ĠSurve": 28095, + "Ġstitches": 28096, + "ĠScorp": 28097, + "ĠDominion": 28098, + "Ġsupplying": 28099, + "Arg": 28100, + "Ġturret": 28101, + "ĠLuk": 28102, + "Ġbrackets": 28103, + "*)": 28104, + "ĠRevolutionary": 28105, + "ĠHonest": 28106, + "Ġnoticing": 28107, + "ĠShannon": 28108, + "Ġafforded": 28109, + "Ġtha": 28110, + "ĠJanet": 28111, + "!--": 28112, + "ĠNarendra": 28113, + "ĠPlot": 28114, + "Hol": 28115, + "sever": 28116, + "eenth": 28117, + "Ġobstruction": 28118, + "Ġ1024": 28119, + "staff": 28120, + "jas": 28121, + "orget": 28122, + "scenes": 28123, + "laughs": 28124, + "ĠFargo": 28125, + "crime": 28126, + "Ġorchestr": 28127, + "Ġdelet": 28128, + "iliary": 28129, + "rieved": 28130, + "Ġmilitar": 28131, + "ĠGreene": 28132, + "âĹı": 28133, + "ãģ¦": 28134, + "ĠGuards": 28135, + "Ġunleashed": 28136, + "ĠWeber": 28137, + "Ġadjustable": 28138, + "Ġcaliber": 28139, + "Ġmotivations": 28140, + "ĠÃł": 28141, + "mAh": 28142, + "ĠLanka": 28143, + "handle": 28144, + "Ġpent": 28145, + "ĠRav": 28146, + "ĠAngular": 28147, + "ĠKau": 28148, + "umbing": 28149, + "Ġphilanthrop": 28150, + "Ġdehyd": 28151, + "Ġtoxicity": 28152, + "eer": 28153, + "ĠYORK": 28154, + "witz": 28155, + "å¼": 28156, + "ĠIE": 28157, + "community": 28158, + "ĠAH": 28159, + "Ġretali": 28160, + "Ġmassively": 28161, + "ĠDaniels": 28162, + "ĠDEL": 28163, + "Ġcarcin": 28164, + "Url": 28165, + "Ġrouting": 28166, + "ĠNPCs": 28167, + "ĠRAF": 28168, + "ryce": 28169, + "Ġwaived": 28170, + "ĠGuatem": 28171, + "Everybody": 28172, + "Ġcovenant": 28173, + "Ġ173": 28174, + "Ġrelaxing": 28175, + "Ġquart": 28176, + "almost": 28177, + "Ġguarded": 28178, + "ĠSoldiers": 28179, + "ĠPLAY": 28180, + "Ġoutgoing": 28181, + "LAND": 28182, + "Ġrewrite": 28183, + "ĠMOV": 28184, + "ĠImper": 28185, + "ĠSolution": 28186, + "Ġphenomenal": 28187, + "Ġlongevity": 28188, + "Ġimpat": 28189, + "ĠNissan": 28190, + "irie": 28191, + "Ġodor": 28192, + "ĠZar": 28193, + "oks": 28194, + "Ġmilitias": 28195, + "ĠSPEC": 28196, + "Ġtolerated": 28197, + "arser": 28198, + "ĠBradford": 28199, + "+,": 28200, + "Ġsurreal": 28201, + "sf": 28202, + "Canadian": 28203, + "Ġresemblance": 28204, + "Ġcarbohydrate": 28205, + "VIEW": 28206, + "Ġaccessory": 28207, + "meal": 28208, + "largest": 28209, + "iegel": 28210, + "Someone": 28211, + "Ġtoughest": 28212, + "oso": 28213, + "Ġfunnel": 28214, + "Ġcondemnation": 28215, + "luent": 28216, + "Ġwired": 28217, + "ĠSunset": 28218, + "Jesus": 28219, + "ĠPST": 28220, + "ĠPages": 28221, + "ĠTycoon": 28222, + "ĠPF": 28223, + "Ġselections": 28224, + "Ġà¤": 28225, + "partisan": 28226, + "Ġhighs": 28227, + "ĠRune": 28228, + "Ġcrafts": 28229, + "lead": 28230, + "ĠParents": 28231, + "Ġreclaim": 28232, + "eker": 28233, + "ĠAllied": 28234, + "aeper": 28235, + "Ġlooming": 28236, + "Ġbeneficiaries": 28237, + "ĠHull": 28238, + "Students": 28239, + "Jewish": 28240, + "dj": 28241, + "Ġpact": 28242, + "template": 28243, + "ĠOfficials": 28244, + "ĠBaylor": 28245, + "Ġhemp": 28246, + "Ġyouths": 28247, + "ĠLevels": 28248, + "ĠXiao": 28249, + "ĠChes": 28250, + "Ġendeavor": 28251, + "ĠRemoved": 28252, + "Ġhippocamp": 28253, + "Hell": 28254, + "ãĤĬ": 28255, + "805": 28256, + "Ġdinosaur": 28257, + "ĠWrath": 28258, + "ĠIndonesian": 28259, + "Ġcalculator": 28260, + "ĠDictionary": 28261, + "Ġ420": 28262, + "ĠMAG": 28263, + "(_": 28264, + "!,": 28265, + "tarians": 28266, + "Ġrestricting": 28267, + "racuse": 28268, + "Ġweekday": 28269, + "OUNT": 28270, + "Ġshrugged": 28271, + "leground": 28272, + "Ġbald": 28273, + "ĠDoctors": 28274, + "Ġtouted": 28275, + "ĠMaxwell": 28276, + "Ġ214": 28277, + "Ġdiplomat": 28278, + "Ġrepression": 28279, + "Ġconstituency": 28280, + "vice": 28281, + "ranked": 28282, + "ĠNapoleon": 28283, + "gang": 28284, + "ĠForever": 28285, + "tun": 28286, + "Ġbulb": 28287, + "ĠPDT": 28288, + "ĠCisco": 28289, + "VEN": 28290, + "Ġresumed": 28291, + "Steven": 28292, + "ĠManitoba": 28293, + "Ġfabulous": 28294, + "ĠAgents": 28295, + "1984": 28296, + "Ġamusing": 28297, + "ĠMysteries": 28298, + "Ġorthodox": 28299, + "floor": 28300, + "Ġquestionnaire": 28301, + "Ġpenetrate": 28302, + "Ġfilmmakers": 28303, + "ĠUnc": 28304, + "Ġstamped": 28305, + "Ġthirteen": 28306, + "Ġoutfield": 28307, + "Ġforwarded": 28308, + "Ġappra": 28309, + "Ġaided": 28310, + "try": 28311, + "Ġunfocused": 28312, + "ĠLiz": 28313, + "ĠWendy": 28314, + "ĠScene": 28315, + "Charg": 28316, + "Ġrejects": 28317, + "Ġleftist": 28318, + "ĠProvidence": 28319, + "ĠBrid": 28320, + "regn": 28321, + "Ġprophecy": 28322, + "ĠLIVE": 28323, + "499": 28324, + "Ġforge": 28325, + "ĠFML": 28326, + "Ġintrinsic": 28327, + "ĠFrog": 28328, + "Ġwont": 28329, + "ĠHolt": 28330, + "Ġfamed": 28331, + "CLUS": 28332, + "aepernick": 28333, + "ĠHate": 28334, + "ĠCay": 28335, + "Ġregistering": 28336, + "ortality": 28337, + "ropy": 28338, + "ocalyptic": 28339, + "aan": 28340, + "nav": 28341, + "Ġfascist": 28342, + "IFIED": 28343, + "Ġimplicated": 28344, + "ĠResort": 28345, + "ĠChandler": 28346, + "ĠBrick": 28347, + "Pin": 28348, + "ysc": 28349, + "Usage": 28350, + "ĠHelm": 28351, + "usra": 28352, + "âĺħâĺħ": 28353, + "ĠAbbas": 28354, + "Ġunanimously": 28355, + "Ġkeeper": 28356, + "Ġaddicted": 28357, + "???": 28358, + "Ġhelmets": 28359, + "Ġantioxid": 28360, + "apsed": 28361, + "808": 28362, + "giene": 28363, + "Ġwaits": 28364, + "Ġminion": 28365, + "raved": 28366, + "ĠPorsche": 28367, + "Ġdreaming": 28368, + "Ġ171": 28369, + "ĠCain": 28370, + "Ġunfor": 28371, + "asso": 28372, + "ĠConfiguration": 28373, + "kun": 28374, + "hardt": 28375, + "Ġnested": 28376, + "ĠLDS": 28377, + "LES": 28378, + "Ġtying": 28379, + "enos": 28380, + "Ġcue": 28381, + "ĠMarqu": 28382, + "skirts": 28383, + "Ġclicked": 28384, + "Ġexpiration": 28385, + "ĠAccordingly": 28386, + "ĠWC": 28387, + "Ġblessings": 28388, + "Ġaddictive": 28389, + "ĠNarr": 28390, + "yx": 28391, + "ĠJaguars": 28392, + "Ġrents": 28393, + "ĠSiber": 28394, + "Ġtipped": 28395, + "ousse": 28396, + "ĠFitzgerald": 28397, + "Ġhierarch": 28398, + "outine": 28399, + "Ġwavelength": 28400, + ">.": 28401, + "chid": 28402, + "ĠProcessing": 28403, + "/+": 28404, + "ranking": 28405, + "Easy": 28406, + "ĠConstruct": 28407, + "Ġtet": 28408, + "insured": 28409, + "HUD": 28410, + "Ġquoting": 28411, + "Ġcommunicated": 28412, + "inx": 28413, + "Ġinmate": 28414, + "Ġerected": 28415, + "ĠAbsolutely": 28416, + "ĠSurely": 28417, + "Ġunim": 28418, + "ĠThrone": 28419, + "heid": 28420, + "Ġclaws": 28421, + "Ġsuperstar": 28422, + "ĠLenn": 28423, + "ĠWhis": 28424, + "Uk": 28425, + "abol": 28426, + "Ġsket": 28427, + "ĠNiet": 28428, + "Ġperks": 28429, + "Ġaffinity": 28430, + "Ġopenings": 28431, + "phasis": 28432, + "Ġdiscriminate": 28433, + "Tip": 28434, + "vc": 28435, + "Ġgrinding": 28436, + "ĠJenny": 28437, + "Ġasthma": 28438, + "holes": 28439, + "ĠHomer": 28440, + "Ġregisters": 28441, + "ĠGlad": 28442, + "Ġcreations": 28443, + "Ġlithium": 28444, + "Ġapplause": 28445, + "until": 28446, + "Justice": 28447, + "ĠTurks": 28448, + "Ġscandals": 28449, + "Ġbake": 28450, + "tank": 28451, + "Mech": 28452, + "ĠMeans": 28453, + "ĠMaid": 28454, + "Republicans": 28455, + "isal": 28456, + "windows": 28457, + "ĠSantos": 28458, + "Ġvegetation": 28459, + "338": 28460, + "tri": 28461, + "Ġflux": 28462, + "insert": 28463, + "Ġclarified": 28464, + "Ġmortg": 28465, + "ĠChim": 28466, + "ĠTort": 28467, + "Ġdisclaim": 28468, + "metal": 28469, + "ĠAside": 28470, + "Ġinduction": 28471, + "Ġinfl": 28472, + "Ġatheists": 28473, + "amph": 28474, + "Ġether": 28475, + "ĠVital": 28476, + "ĠBuilt": 28477, + "Mind": 28478, + "Ġweaponry": 28479, + "SET": 28480, + "Ġ186": 28481, + "admin": 28482, + "gam": 28483, + "contract": 28484, + "afa": 28485, + "Ġderivatives": 28486, + "Ġsnacks": 28487, + "Ġchurn": 28488, + "Econom": 28489, + "Ġcapped": 28490, + "ĠUnderstanding": 28491, + "ĠHers": 28492, + "ĠIz": 28493, + "Ġduct": 28494, + "IENT": 28495, + "aughty": 28496, + "ĠâľĶ": 28497, + "ĠNP": 28498, + "Ġsailing": 28499, + "Initialized": 28500, + "Ġted": 28501, + "Ġreactors": 28502, + "ĠLomb": 28503, + "Ġchoke": 28504, + "ĠWorm": 28505, + "Ġadmiration": 28506, + "Ġswung": 28507, + "ensibly": 28508, + "Ġrash": 28509, + "ĠGoals": 28510, + "ĠImportant": 28511, + "Shot": 28512, + "ĠRas": 28513, + "Ġtrainers": 28514, + "ĠBun": 28515, + "Working": 28516, + "Ġharmed": 28517, + "ĠPandora": 28518, + "ĠLTE": 28519, + "Ġmushroom": 28520, + "ĠCHAR": 28521, + "ĠFee": 28522, + "ĠMoy": 28523, + "Born": 28524, + "oliberal": 28525, + "ĠMartial": 28526, + "Ġgentlemen": 28527, + "Ġlingering": 28528, + "Official": 28529, + "Ġgraffiti": 28530, + "ĠNames": 28531, + "Der": 28532, + "Ġquint": 28533, + "istrate": 28534, + "azeera": 28535, + "ĠNOTICE": 28536, + "ĠFlorence": 28537, + "Ġpayable": 28538, + "Ġdepicts": 28539, + "ĠSpecies": 28540, + "Heart": 28541, + "âĶĢâĶĢâĶĢâĶĢâĶĢâĶĢâĶĢâĶĢ": 28542, + "Ġenclosed": 28543, + "Increases": 28544, + "Daily": 28545, + "ĠLis": 28546, + "Ġenactment": 28547, + "ĠBacon": 28548, + "ĠSteele": 28549, + "demand": 28550, + "Ġ183": 28551, + "Ġmouths": 28552, + "Ġstranded": 28553, + "Ġenhancement": 28554, + "011": 28555, + "ĠWhats": 28556, + "Ġhealed": 28557, + "eny": 28558, + "ĠRab": 28559, + "Ġ340": 28560, + "ĠLabyrinth": 28561, + "roach": 28562, + "ĠYosh": 28563, + "ĠClippers": 28564, + "Ġconcerts": 28565, + "Internet": 28566, + "355": 28567, + "Ġstickers": 28568, + "Ġtermed": 28569, + "ĠAxe": 28570, + "Ġgrandparents": 28571, + "France": 28572, + "ĠClim": 28573, + "ĠUh": 28574, + "ulic": 28575, + "Ġthrill": 28576, + "centric": 28577, + "ĠOverview": 28578, + "ĠConduct": 28579, + "Ġsubstantive": 28580, + "Ġ182": 28581, + "mur": 28582, + "Ġstray": 28583, + "ĠCoff": 28584, + "Ġrepetitive": 28585, + "ĠForgotten": 28586, + "Ġqualification": 28587, + "ewitness": 28588, + "ĠZimbabwe": 28589, + "Ġsimulated": 28590, + "ĠJD": 28591, + "253": 28592, + "ĠWare": 28593, + "Ġunsc": 28594, + "Times": 28595, + "Ġsummons": 28596, + "Ġdisconnected": 28597, + "Ġ184": 28598, + "cius": 28599, + "ĠGujar": 28600, + "odka": 28601, + "Ġerase": 28602, + "ĠTobacco": 28603, + "elected": 28604, + "Ġuncont": 28605, + "ĠShepard": 28606, + "ĠLamp": 28607, + "Ġalerted": 28608, + "Ġoperative": 28609, + "arna": 28610, + "uint": 28611, + "Ġnegligence": 28612, + "acements": 28613, + "Ġsupra": 28614, + "Ġprevail": 28615, + "ĠShark": 28616, + "Ġbelts": 28617, + "ãģ«": 28618, + "Ġtighter": 28619, + "Engineers": 28620, + "Ġinactive": 28621, + "Ġexponent": 28622, + "ĠWillie": 28623, + "aples": 28624, + "Ġheir": 28625, + "ĠHits": 28626, + "iann": 28627, + "ĠSays": 28628, + "Ġcurrents": 28629, + "ĠBengal": 28630, + "Ġarist": 28631, + "Buffer": 28632, + "Ġbreeze": 28633, + "ĠWesley": 28634, + "Cola": 28635, + "Ġpronoun": 28636, + "Ġdeed": 28637, + "ĠKling": 28638, + "Ġoft": 28639, + "Ġinflict": 28640, + "Ġpunishing": 28641, + "Ġnm": 28642, + "iku": 28643, + "ODUCT": 28644, + "014": 28645, + "Ġsubsidy": 28646, + "ĠDEA": 28647, + "ĠHerbert": 28648, + "ĠJal": 28649, + "Bank": 28650, + "Ġdeferred": 28651, + "Ġshipment": 28652, + "Bott": 28653, + "Ġalle": 28654, + "bearing": 28655, + "HTML": 28656, + "Offline": 28657, + "Ġ213": 28658, + "Ġscrolling": 28659, + "Ġscanned": 28660, + "ĠLibyan": 28661, + "ĠTOP": 28662, + "chrom": 28663, + "dt": 28664, + "column": 28665, + "PsyNetMessage": 28666, + "Zero": 28667, + "Ġtorso": 28668, + "050": 28669, + "âķIJ": 28670, + "Ġimperson": 28671, + "ĠSchwartz": 28672, + "udic": 28673, + "Ġpissed": 28674, + "ĠSapp": 28675, + "257": 28676, + "ĠISPs": 28677, + "ogl": 28678, + "Ġsupervised": 28679, + "Ġadolescent": 28680, + "Ġattained": 28681, + "ĠDelivery": 28682, + "ĠBunny": 28683, + "Ġ1937": 28684, + "Ġminiature": 28685, + "Ġos": 28686, + "Ġ370": 28687, + "608": 28688, + "ĠMourinho": 28689, + "Ġinnate": 28690, + "Ġtempo": 28691, + "ĠNM": 28692, + "ĠFallen": 28693, + "009": 28694, + "Ġprovocative": 28695, + "Streamer": 28696, + "ĠBenedict": 28697, + "ĠBolshe": 28698, + "Ġturtle": 28699, + "ĠPCB": 28700, + "ĠEqual": 28701, + "Director": 28702, + "ĠRend": 28703, + "Ġfluids": 28704, + "Authorities": 28705, + "Ġcousins": 28706, + "requency": 28707, + "ĠNeighbor": 28708, + "sets": 28709, + "shared": 28710, + "Charles": 28711, + "password": 28712, + "Ġgears": 28713, + "Ġ211": 28714, + "ĠHardware": 28715, + "rika": 28716, + "Ġupstream": 28717, + "Hom": 28718, + "Ġdisproportionately": 28719, + "ivities": 28720, + "Ġundefined": 28721, + "Ġelectrons": 28722, + "Ġcommemor": 28723, + "Eventually": 28724, + "Ġ><": 28725, + "Ġirresponsible": 28726, + "218": 28727, + "ĠReleased": 28728, + "ĠOVER": 28729, + "ĠIGN": 28730, + "ĠBread": 28731, + "stellar": 28732, + "ĠSage": 28733, + "tted": 28734, + "damage": 28735, + "edition": 28736, + "ĠPrec": 28737, + "Ġlime": 28738, + "Ġconfinement": 28739, + "Ġcalorie": 28740, + "weapon": 28741, + "Ġdiffering": 28742, + "ĠSina": 28743, + "mys": 28744, + "amd": 28745, + "Ġintricate": 28746, + "kk": 28747, + "ĠPAT": 28748, + "ão": 28749, + "stones": 28750, + "links": 28751, + "Ġranch": 28752, + "Semitic": 28753, + "Ġdifferentiate": 28754, + "ĠSinger": 28755, + "occupied": 28756, + "Ġfortress": 28757, + "cmd": 28758, + "Ġinterception": 28759, + "ĠAnkara": 28760, + "Ġrept": 28761, + "ĠSolitaire": 28762, + "Ġremake": 28763, + "pred": 28764, + "Ġdared": 28765, + "autions": 28766, + "ĠBACK": 28767, + "Running": 28768, + "Ġdebugging": 28769, + "Ġgraphs": 28770, + "399": 28771, + "ĠNigel": 28772, + "Ġbun": 28773, + "Ġpillow": 28774, + "Ġprogressed": 28775, + "fashioned": 28776, + "Ġobedience": 28777, + "ERN": 28778, + "Ġrehears": 28779, + "Cell": 28780, + "tl": 28781, + "Sher": 28782, + "Ġherald": 28783, + "ĠPayment": 28784, + "ĠCory": 28785, + "ĠDept": 28786, + "Ġrepent": 28787, + "ĠWeak": 28788, + "uckland": 28789, + "Ġpleasing": 28790, + "Ġshortages": 28791, + "Ġjurors": 28792, + "ĠKab": 28793, + "qqa": 28794, + "Anti": 28795, + "Ġwow": 28796, + "ĠRCMP": 28797, + "Ġtsun": 28798, + "ĠSic": 28799, + "Ġcomprises": 28800, + "Ġspies": 28801, + "Ġprecinct": 28802, + "nu": 28803, + "Ġurges": 28804, + "Ġtimed": 28805, + "Ġstripes": 28806, + "ĠBoots": 28807, + "Ġyen": 28808, + "Advanced": 28809, + "Ġdiscrete": 28810, + "ĠArchangel": 28811, + "employment": 28812, + "Diff": 28813, + "Ġmonuments": 28814, + "Ġ209": 28815, + "worker": 28816, + "Ġ196": 28817, + "ĠIg": 28818, + "utterstock": 28819, + "TPS": 28820, + "Jac": 28821, + "Ġhomelessness": 28822, + "Ġcommentator": 28823, + "Ġracially": 28824, + "fing": 28825, + "seed": 28826, + "Ele": 28827, + "ellation": 28828, + "Ġethanol": 28829, + "Ġparish": 28830, + "ĠDong": 28831, + "ĠAwakening": 28832, + "Ġdeviation": 28833, + "ĠBearing": 28834, + "ĠTsuk": 28835, + "Ġrecess": 28836, + "Ġlymph": 28837, + "ĠCannabis": 28838, + "åľ": 28839, + "ĠNEWS": 28840, + "Ġdra": 28841, + "ĠStefan": 28842, + "ĠWrong": 28843, + "ĠSAM": 28844, + "Ġloosely": 28845, + "Ġinterpreter": 28846, + "ĠPlain": 28847, + "Government": 28848, + "Ġbigotry": 28849, + "Ġgrenades": 28850, + "avez": 28851, + "pictured": 28852, + "Ġmandated": 28853, + "ĠMonk": 28854, + "ĠPedro": 28855, + "Ġlava": 28856, + "274": 28857, + "Ġcynical": 28858, + "ĠScrolls": 28859, + "locks": 28860, + "Mp": 28861, + "Ġcongregation": 28862, + "ornings": 28863, + "phil": 28864, + "ĠIbid": 28865, + "Ġferv": 28866, + "Ġdisappearing": 28867, + "Ġarrogant": 28868, + "syn": 28869, + "ĠMaver": 28870, + "ĠSuit": 28871, + "241": 28872, + "Ġabbre": 28873, + "ackers": 28874, + "Pa": 28875, + "ĠYel": 28876, + "Whenever": 28877, + "Ġ235": 28878, + "ĠVine": 28879, + "ĠAnat": 28880, + "Ġextinct": 28881, + "LET": 28882, + "Ġexecutable": 28883, + "VERS": 28884, + "oxide": 28885, + "DNA": 28886, + "ĠPrel": 28887, + "Ġresentment": 28888, + "Ġcomprise": 28889, + "ĠAviv": 28890, + "Ġinterceptions": 28891, + "Ġprolific": 28892, + "INA": 28893, + "ĠErin": 28894, + "thought": 28895, + "219": 28896, + "ĠPsychiatry": 28897, + "unky": 28898, + "chemist": 28899, + "Ho": 28900, + "ĠMcCoy": 28901, + "Ġbricks": 28902, + "Los": 28903, + "rily": 28904, + "ĠUSSR": 28905, + "Ġrud": 28906, + "Ġlaud": 28907, + "ĠWise": 28908, + "ĠEmerald": 28909, + "Ġrevived": 28910, + "Ġdamned": 28911, + "ĠRepair": 28912, + "idem": 28913, + "ctica": 28914, + "Ġpatriarch": 28915, + "ĠNurs": 28916, + "meg": 28917, + "Ġcheapest": 28918, + "reements": 28919, + "empty": 28920, + "ĠCelebr": 28921, + "Ġdeprivation": 28922, + "chanted": 28923, + "ĠThumbnails": 28924, + "Energy": 28925, + "ĠEthan": 28926, + "ĠQing": 28927, + "Ġopposes": 28928, + "WIND": 28929, + "vik": 28930, + "ĠMau": 28931, + "ĠSUB": 28932, + "667": 28933, + "GRE": 28934, + "ĠVolunte": 28935, + "nton": 28936, + "Cook": 28937, + "åIJ": 28938, + "esque": 28939, + "Ġplummet": 28940, + "Ġsuing": 28941, + "Ġpronounce": 28942, + "Ġresisting": 28943, + "ĠFishing": 28944, + "ĠTrials": 28945, + "Ġyell": 28946, + "Ġ310": 28947, + "Ġinduct": 28948, + "Ġpersonalized": 28949, + "often": 28950, + "Reb": 28951, + "EMBER": 28952, + "Ġviewpoint": 28953, + "Ġexistential": 28954, + "())": 28955, + "remove": 28956, + "MENTS": 28957, + "lasses": 28958, + "Ġevapor": 28959, + "Ġaisle": 28960, + "meta": 28961, + "Ġreflective": 28962, + "Ġentitlement": 28963, + "Ġdevised": 28964, + "music": 28965, + "ascade": 28966, + "Ġwinding": 28967, + "offset": 28968, + "Ġaccessibility": 28969, + "kered": 28970, + "Better": 28971, + "ĠJohnston": 28972, + "thinking": 28973, + "Snow": 28974, + "ĠCroatia": 28975, + "ĠAtomic": 28976, + "271": 28977, + "348": 28978, + "Ġtextbook": 28979, + "ĠSixth": 28980, + "ĠاÙĦ": 28981, + "Ġslider": 28982, + "ĠBurger": 28983, + "bol": 28984, + "Sync": 28985, + "Ġgrandchildren": 28986, + "Ġcerv": 28987, + "+)": 28988, + "Ġeternity": 28989, + "Ġtweeting": 28990, + "Ġspeculative": 28991, + "Ġpivotal": 28992, + "ĠWP": 28993, + "ĠTER": 28994, + "ynamic": 28995, + "Ġupl": 28996, + "ĠCats": 28997, + "perhaps": 28998, + "Ġclassmates": 28999, + "Ġblatant": 29000, + "'-": 29001, + "Ġlakh": 29002, + "antine": 29003, + "ĠBorg": 29004, + "iom": 29005, + "/(": 29006, + "ĠAthletic": 29007, + "Ġsar": 29008, + "OTA": 29009, + "ĠHoffman": 29010, + "Nevertheless": 29011, + "Ġadorable": 29012, + "Ġspawned": 29013, + "Associated": 29014, + "ĠDomestic": 29015, + "Ġimplant": 29016, + "ĠLuxem": 29017, + "ĠKens": 29018, + "Ġpumps": 29019, + "ĠSAT": 29020, + "Attributes": 29021, + "509": 29022, + "avour": 29023, + "Ġcentralized": 29024, + "ĠTN": 29025, + "Ġfreshly": 29026, + "ĠAchieve": 29027, + "Ġoutsiders": 29028, + "herty": 29029, + "ĠRee": 29030, + "ĠTowers": 29031, + "ĠDart": 29032, + "akable": 29033, + "Ġmp": 29034, + "ĠHeavenly": 29035, + "Ġripe": 29036, + "ĠCaroline": 29037, + "ryan": 29038, + "Ġclassics": 29039, + "Ġretiring": 29040, + "Ġ228": 29041, + "Ġah": 29042, + "Ġdealings": 29043, + "Ġpunching": 29044, + "ĠChapman": 29045, + "Options": 29046, + "maxwell": 29047, + "volume": 29048, + "Ġstal": 29049, + "Ġexported": 29050, + "ĠQuite": 29051, + "Ġnumerical": 29052, + "Burn": 29053, + "Fact": 29054, + "ĠKeystone": 29055, + "Ġtrending": 29056, + "Ġaltering": 29057, + "ĠAfricans": 29058, + "478": 29059, + "ĠMN": 29060, + "ĠKnock": 29061, + "Ġtemptation": 29062, + "Ġprestige": 29063, + "Overview": 29064, + "ĠTraditional": 29065, + "ĠBahrain": 29066, + "Private": 29067, + "ĠHOU": 29068, + "Ġbarr": 29069, + "ĠTat": 29070, + "Cube": 29071, + "USD": 29072, + "ĠGrande": 29073, + "ĠGat": 29074, + "ĠFlo": 29075, + "Ġresides": 29076, + "Ġindec": 29077, + "volent": 29078, + "Ġperpetual": 29079, + "ubes": 29080, + "Ġworldview": 29081, + "ĠQuantum": 29082, + "Ġfiltered": 29083, + "Ġensu": 29084, + "orgetown": 29085, + "ERSON": 29086, + "ĠMild": 29087, + "379": 29088, + "OTT": 29089, + "Ã¥": 29090, + "Ġvitamins": 29091, + "Ġribbon": 29092, + "Ġsincerely": 29093, + "ĠHin": 29094, + "Ġeighteen": 29095, + "Ġcontradictory": 29096, + "Ġglaring": 29097, + "Ġexpectancy": 29098, + "Ġconspir": 29099, + "Ġmonstrous": 29100, + "Ġ380": 29101, + "reci": 29102, + "Ġhandic": 29103, + "Ġpumped": 29104, + "Ġindicative": 29105, + "Ġrapp": 29106, + "Ġavail": 29107, + "ĠLEGO": 29108, + "ĠMarijuana": 29109, + "1985": 29110, + "erton": 29111, + "Ġtwentieth": 29112, + "################################": 29113, + "ĠSwamp": 29114, + "Ġvaluation": 29115, + "Ġaffiliates": 29116, + "adjusted": 29117, + "ĠFacility": 29118, + "262": 29119, + "Ġenzymes": 29120, + "itudinal": 29121, + "Ġimprint": 29122, + "Site": 29123, + "Ġinstaller": 29124, + "ĠTRA": 29125, + "mology": 29126, + "linear": 29127, + "ĠCollective": 29128, + "igating": 29129, + "ĠToken": 29130, + "Ġspeculated": 29131, + "KN": 29132, + "ĠCly": 29133, + "ority": 29134, + "Ġdefer": 29135, + "Ġinspectors": 29136, + "approved": 29137, + "RM": 29138, + "ĠSuns": 29139, + "Ġinforming": 29140, + "ĠSyracuse": 29141, + "ibli": 29142, + "765": 29143, + "Ġglove": 29144, + "Ġauthorize": 29145, + "âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦": 29146, + "ĠCruise": 29147, + "Ġcontracting": 29148, + "shell": 29149, + "IFE": 29150, + "ĠJewel": 29151, + "pract": 29152, + "ĠPhotoshop": 29153, + "ĠKnowing": 29154, + "harm": 29155, + "Ġattractions": 29156, + "adan": 29157, + "etus": 29158, + "018": 29159, + "wagen": 29160, + "Alt": 29161, + "Ġmultiply": 29162, + "Ġequilibrium": 29163, + ":{": 29164, + "ĠFighters": 29165, + "ĠEdgar": 29166, + "Ġfourteen": 29167, + "Govern": 29168, + "Ġmisuse": 29169, + "Ġabusing": 29170, + "Ġancestry": 29171, + "ramer": 29172, + "644": 29173, + "Ġworms": 29174, + "Ġthicker": 29175, + "ĠCombine": 29176, + "Ġpeasants": 29177, + "Ġvind": 29178, + "Ġconquest": 29179, + "Ġmocked": 29180, + "Ġcinnamon": 29181, + "ĠCald": 29182, + "ĠGallup": 29183, + "Ġavoidance": 29184, + "Ġincarnation": 29185, + "ĠStrat": 29186, + "Ġtasted": 29187, + "enta": 29188, + "ĠNeal": 29189, + "pared": 29190, + "Ġterminology": 29191, + "jection": 29192, + "Scientists": 29193, + "ĠINS": 29194, + "ĠDee": 29195, + "Ġdirectories": 29196, + "Road": 29197, + "ĠShap": 29198, + "bright": 29199, + "ĠDirectors": 29200, + "ĠColumn": 29201, + "Ġbob": 29202, + "Ġpreferably": 29203, + "Ġglitch": 29204, + "furt": 29205, + "Ġeg": 29206, + "idis": 29207, + "CBC": 29208, + "Ġsurrendered": 29209, + "Ġtestament": 29210, + "336": 29211, + "uggest": 29212, + "ĠNil": 29213, + "another": 29214, + "Ġpathetic": 29215, + "ĠDonna": 29216, + "Ġ218": 29217, + "ĠAvery": 29218, + "Ġwhiskey": 29219, + "Ġfixture": 29220, + "ĠConquest": 29221, + "Ġbets": 29222, + "Occ": 29223, + "ĠLeicester": 29224, + "].\"": 29225, + "Ġ));": 29226, + "Ġflashes": 29227, + "456": 29228, + "Ġmasked": 29229, + "gebra": 29230, + "Ġcomputed": 29231, + "chel": 29232, + "auder": 29233, + "Ġdefeats": 29234, + "ĠLiberation": 29235, + "ĠOsama": 29236, + "ĠVive": 29237, + "Changes": 29238, + "Channel": 29239, + "Ġtariffs": 29240, + "Ġmage": 29241, + "ĠSax": 29242, + "Ġinadvertently": 29243, + "ĠCRE": 29244, + "ĠReaper": 29245, + "inky": 29246, + "grading": 29247, + "Ġstereotyp": 29248, + "Ġcurl": 29249, + "ĠFANT": 29250, + "Ġframeworks": 29251, + "Mom": 29252, + "ĠAnch": 29253, + "Ġflavour": 29254, + "carbon": 29255, + "Ġpermitting": 29256, + "letcher": 29257, + "ĠMozilla": 29258, + "ĠParking": 29259, + "ĠChamp": 29260, + "Scroll": 29261, + "Ġmurderer": 29262, + "Ġrested": 29263, + "Ġowes": 29264, + "ĠPoss": 29265, + "ADD": 29266, + "IFF": 29267, + "resolution": 29268, + "ĠMining": 29269, + "Ġcomparative": 29270, + "Dim": 29271, + "Ġneighbouring": 29272, + "ĠAST": 29273, + "ĠToxic": 29274, + "Ġbiases": 29275, + "Ġgunfire": 29276, + "urous": 29277, + "ĠMoment": 29278, + "1983": 29279, + "Ġpervasive": 29280, + "ttp": 29281, + "ĠNormally": 29282, + "rir": 29283, + "Sarah": 29284, + "ĠAlbany": 29285, + "Ġunsett": 29286, + "ĠSMS": 29287, + "ipers": 29288, + "layer": 29289, + "ĠWhites": 29290, + "uple": 29291, + "Ġturbo": 29292, + "ĠLeeds": 29293, + "Ġthats": 29294, + "ĠMiner": 29295, + "MER": 29296, + "ĠReign": 29297, + "Ġperme": 29298, + "ĠBlitz": 29299, + "Ġ1934": 29300, + "Ġintimidating": 29301, + "tube": 29302, + "Ġeccentric": 29303, + "abolic": 29304, + "boxes": 29305, + "ĠAssociates": 29306, + "votes": 29307, + "Ġsimulate": 29308, + "umbo": 29309, + "astery": 29310, + "Ġshipments": 29311, + "FFFF": 29312, + "anth": 29313, + "Ġseasoned": 29314, + "Ġexperimentation": 29315, + "âĸł": 29316, + "laws": 29317, + "Meet": 29318, + "iddles": 29319, + "antics": 29320, + "Rating": 29321, + "ISIS": 29322, + "hift": 29323, + "Ġfronts": 29324, + "buf": 29325, + "017": 29326, + "Ġunatt": 29327, + "ĠDil": 29328, + "leases": 29329, + "ĠGardens": 29330, + "777": 29331, + "touch": 29332, + "vell": 29333, + "458": 29334, + "Ġ=====": 29335, + "saving": 29336, + "Ġerosion": 29337, + "ĠQuin": 29338, + "Ġearns": 29339, + "Ġaccomplishment": 29340, + "ĠWei": 29341, + "Ġ<[": 29342, + "_____": 29343, + "Ġirrig": 29344, + "ĠTeddy": 29345, + "Ġconquered": 29346, + "ĠArmored": 29347, + "Ġasserts": 29348, + "Ġmanipulating": 29349, + "ré": 29350, + "Ġtranscripts": 29351, + "Gallery": 29352, + "Ġplotting": 29353, + "Neil": 29354, + "Ġbetrayal": 29355, + "loader": 29356, + "ĠSul": 29357, + "Ġdisplacement": 29358, + "Ġroyalty": 29359, + "ĠWI": 29360, + "heit": 29361, + "ĠDevices": 29362, + "allel": 29363, + "Ġmunicipalities": 29364, + "Ġcanal": 29365, + "Stars": 29366, + "ĠUAE": 29367, + "Ġ\"âĢ¦": 29368, + "ĠCU": 29369, + "above": 29370, + "Ġresonance": 29371, + "ĠguiActiveUn": 29372, + "added": 29373, + "ĠBraves": 29374, + "ĠIbn": 29375, + "Ġhereby": 29376, + "ĠBRE": 29377, + "Ġshareholder": 29378, + "ĠHir": 29379, + "ĠJi": 29380, + "Ġstrangely": 29381, + "Ġadmired": 29382, + "Ġplight": 29383, + "Ġbachelor": 29384, + "ĠPole": 29385, + "ciplinary": 29386, + "Tony": 29387, + "ĠArmenian": 29388, + "Ġunman": 29389, + "ĠZionist": 29390, + "Stage": 29391, + "iscover": 29392, + "Ġautomotive": 29393, + "Ġsidelines": 29394, + "Ġslick": 29395, + "ĠRenaissance": 29396, + "ĠFUN": 29397, + "Images": 29398, + "ĠHaj": 29399, + "Ġping": 29400, + "Ġshortcut": 29401, + "ĠBlvd": 29402, + "ĠLooks": 29403, + "Ġbursts": 29404, + "Ġclamp": 29405, + "Ġmish": 29406, + "Ġsorting": 29407, + "Ġpatriot": 29408, + "Ġcorrectness": 29409, + "ĠScandinav": 29410, + "ĠCavaliers": 29411, + "python": 29412, + "azar": 29413, + "Ġ375": 29414, + "ĠJaune": 29415, + "409": 29416, + "Ġdetrimental": 29417, + "Ġstabbing": 29418, + "Ġpoisoned": 29419, + "Ġfountain": 29420, + "ocent": 29421, + "orst": 29422, + "ĠMari": 29423, + "Ġrains": 29424, + "ĠOvers": 29425, + "ĠInstitution": 29426, + "udget": 29427, + "AMY": 29428, + "tale": 29429, + "ĠKR": 29430, + "ĠPrices": 29431, + "Ġheadaches": 29432, + "Ġlandsl": 29433, + "ĠAura": 29434, + "Bonus": 29435, + "ĠZhao": 29436, + "ĠHip": 29437, + "Ġhops": 29438, + "ĠKurdistan": 29439, + "Ġexploiting": 29440, + "ryn": 29441, + "Ġhypocrisy": 29442, + "opening": 29443, + "Ġgunshot": 29444, + "Ġwed": 29445, + "interstitial": 29446, + "Interstitial": 29447, + "Ġamen": 29448, + "Breaking": 29449, + "Ġmarketed": 29450, + "Wire": 29451, + "ĠCrowd": 29452, + "Continue": 29453, + "ĠKnown": 29454, + "ĠEffective": 29455, + "orean": 29456, + "izons": 29457, + "Joseph": 29458, + "Ġescalation": 29459, + "username": 29460, + "Ġcurtain": 29461, + "ATES": 29462, + "ĠPAR": 29463, + "ĠMiy": 29464, + "Ġcounterfe": 29465, + "lene": 29466, + "Ġcontenders": 29467, + "daily": 29468, + "ĠAsc": 29469, + "ĠPhillip": 29470, + "mostly": 29471, + "Ġfilename": 29472, + "hene": 29473, + "Ġresembling": 29474, + "Ġstaging": 29475, + "ĠChloe": 29476, + "Ġwiring": 29477, + "Hon": 29478, + "ĠRenew": 29479, + "ottage": 29480, + "ĠHybrid": 29481, + "much": 29482, + "Ġstrokes": 29483, + "Ġpolicymakers": 29484, + "APTER": 29485, + "ĠArkham": 29486, + "plot": 29487, + "Ġassistants": 29488, + "Ġdeport": 29489, + "ĠSega": 29490, + "Ġinfluenza": 29491, + "ĠCursed": 29492, + "ĠKobe": 29493, + "Ġskinny": 29494, + "Provider": 29495, + "ĠRip": 29496, + "Ġincremental": 29497, + "products": 29498, + "BF": 29499, + "Ġdome": 29500, + "ĠCredits": 29501, + "Ġlosers": 29502, + "ints": 29503, + "ĠBetty": 29504, + "ĠTalent": 29505, + "ĠDAM": 29506, + "Lv": 29507, + "Ess": 29508, + "Ġdens": 29509, + "temp": 29510, + "Judge": 29511, + "odic": 29512, + "Ġ'(": 29513, + "URES": 29514, + "etsk": 29515, + "VO": 29516, + "Ġretrieved": 29517, + "Ġarchitects": 29518, + "Ùĩ": 29519, + "Ġethic": 29520, + "ĠSecondary": 29521, + "stocks": 29522, + "adia": 29523, + "Ġ325": 29524, + "ĠOpinion": 29525, + "Ġsimultaneous": 29526, + "Ġdizz": 29527, + "ulp": 29528, + "Ġsmuggling": 29529, + "ippery": 29530, + "Random": 29531, + "facing": 29532, + "ĠDas": 29533, + "Ġstockp": 29534, + "Ġdisclosures": 29535, + "pointer": 29536, + "Ġcoral": 29537, + "ĠSelection": 29538, + "ĠPike": 29539, + "ivalent": 29540, + "Ġruthless": 29541, + "ĠRim": 29542, + "Ġensuing": 29543, + "ĠExperiment": 29544, + "Ġcongressman": 29545, + "Ġbeliever": 29546, + "Ġunspecified": 29547, + "ĠMord": 29548, + "Ġknowledgeable": 29549, + "ĠVERY": 29550, + "TX": 29551, + "Ġstraps": 29552, + "Ġturf": 29553, + "apeshifter": 29554, + "Ġmarital": 29555, + "Ġflock": 29556, + "ãģĨ": 29557, + "263": 29558, + "AMES": 29559, + "ĠOpposition": 29560, + "Ġtreasures": 29561, + "ĠGOD": 29562, + "Ġmodeled": 29563, + "ĠWORLD": 29564, + "Ġ([": 29565, + "ĠUsage": 29566, + "HF": 29567, + "Ġ$(": 29568, + "ussed": 29569, + "Ġpioneer": 29570, + "Eight": 29571, + "parse": 29572, + "bread": 29573, + "ritz": 29574, + "ĠMiranda": 29575, + "ĠKant": 29576, + "++)": 29577, + "oren": 29578, + "Ġprovoked": 29579, + "Ġbreeds": 29580, + "ĠIncludes": 29581, + "ĠPastebin": 29582, + "ĠFlip": 29583, + "Java": 29584, + "Ġbrink": 29585, + "Ġrumored": 29586, + "Ġunseen": 29587, + "Ġgarnered": 29588, + "ĠDefin": 29589, + "alted": 29590, + "Ġtattoos": 29591, + "Ġhesitation": 29592, + "isitions": 29593, + "ĠWeaver": 29594, + "ĠReporting": 29595, + "Ġtherapies": 29596, + "Ġconsultants": 29597, + "Ġresidual": 29598, + "ĠMali": 29599, + "ĠRoma": 29600, + "iago": 29601, + "ĠResidents": 29602, + "ubi": 29603, + "Ġremedies": 29604, + "Ġadaptive": 29605, + "ĠAlive": 29606, + "ĠBarcl": 29607, + "Ġwallets": 29608, + "crypt": 29609, + "etermination": 29610, + "ĠPelosi": 29611, + "Ġslipping": 29612, + "otonin": 29613, + "Ġalliances": 29614, + "patrick": 29615, + "iris": 29616, + "Ġorth": 29617, + "ĠPerkins": 29618, + "ĠDeV": 29619, + "ĠGets": 29620, + "Ġdrying": 29621, + "gee": 29622, + "forest": 29623, + "ĠForget": 29624, + "orem": 29625, + "339": 29626, + "Ġvaguely": 29627, + "ĠDion": 29628, + "ĠPorn": 29629, + "ĠHOW": 29630, + "Ġpneum": 29631, + "Ġrubble": 29632, + "ĠTaste": 29633, + "encia": 29634, + "ĠGel": 29635, + "Ġdst": 29636, + "Ġ245": 29637, + "ĠMorocco": 29638, + "inflamm": 29639, + "ĠTwins": 29640, + "Ġbots": 29641, + "daughter": 29642, + "ĠBalk": 29643, + "Ġbrethren": 29644, + "Ġlogos": 29645, + "Ġgobl": 29646, + "fps": 29647, + "Ġsubdivision": 29648, + "Ġpawn": 29649, + "Ġsqueezed": 29650, + "Ġmorale": 29651, + "ĠDW": 29652, + "'\"": 29653, + "Ġknot": 29654, + "ooky": 29655, + "Ġdivisive": 29656, + "Ġboosted": 29657, + "chy": 29658, + "ãĥIJ": 29659, + "ifact": 29660, + "Ġnewcomers": 29661, + "ĠWrestling": 29662, + "Ġscouts": 29663, + "wolves": 29664, + "Rat": 29665, + "Ġnineteenth": 29666, + "ĠOsborne": 29667, + "Stats": 29668, + "Ġempowered": 29669, + "Ġpsychopath": 29670, + "ĠOEM": 29671, + "uggage": 29672, + "ĠPK": 29673, + "ĠMohammad": 29674, + "Pak": 29675, + "Ġanarchists": 29676, + "ĠExtract": 29677, + "esthes": 29678, + "ĠStockholm": 29679, + "loo": 29680, + "ĠGraph": 29681, + "Ġdeploying": 29682, + "ĠStranger": 29683, + "ĠMold": 29684, + "Ġstaffer": 29685, + "Ġdiscounted": 29686, + "uckle": 29687, + "please": 29688, + "ĠLanding": 29689, + "ÃŃa": 29690, + "Ġ193": 29691, + "Ġante": 29692, + "Ġrepetition": 29693, + "Ġ+/-": 29694, + "Ġparody": 29695, + "Ġlively": 29696, + "AAA": 29697, + "ĠHorus": 29698, + "Ġpits": 29699, + "inders": 29700, + "LOC": 29701, + "ĠVenice": 29702, + "406": 29703, + "ĠDiscover": 29704, + "âĨ": 29705, + "ellectual": 29706, + "Ġpens": 29707, + "Ġeyel": 29708, + "iguous": 29709, + "Impl": 29710, + "Ġjoking": 29711, + "Ġinval": 29712, + "ĠBelfast": 29713, + "Ġcreditors": 29714, + "ĠSkywalker": 29715, + "ovsky": 29716, + "Ġceasefire": 29717, + "Ġseals": 29718, + "isoft": 29719, + ")).": 29720, + "ĠFelix": 29721, + "ITS": 29722, + "Ġtresp": 29723, + "ĠBlockchain": 29724, + "eware": 29725, + "ĠSchwar": 29726, + "enne": 29727, + "mounted": 29728, + "ĠBeacon": 29729, + "lesh": 29730, + "Ġimmensely": 29731, + "Ġcheering": 29732, + "Employ": 29733, + "scene": 29734, + "ishly": 29735, + "atchewan": 29736, + "ĠNicolas": 29737, + "Ġdrained": 29738, + "ĠExit": 29739, + "ĠAzerb": 29740, + "jun": 29741, + "Ġfloated": 29742, + "uania": 29743, + "Deep": 29744, + "Ġsuperv": 29745, + "Ġmystical": 29746, + "ĠDollar": 29747, + "ĠApostle": 29748, + "ĠREL": 29749, + "ĠProvided": 29750, + "ĠBucks": 29751, + "ãĥ´": 29752, + "cutting": 29753, + "Ġenhancements": 29754, + "ĠPenguins": 29755, + "ĠIsaiah": 29756, + "Ġjerk": 29757, + "ĠWyn": 29758, + "Ġstalled": 29759, + "Ġcryptocurrencies": 29760, + "ĠRoland": 29761, + "single": 29762, + "Ġlumin": 29763, + "ĠFellow": 29764, + "ĠCapacity": 29765, + "ĠKazakh": 29766, + "WN": 29767, + "Ġfinanced": 29768, + "389": 29769, + "Ġtid": 29770, + "Ġcollusion": 29771, + "ĠMyr": 29772, + "îĢ": 29773, + "Senator": 29774, + "Ġpediatric": 29775, + "Ġneatly": 29776, + "Ġsandwiches": 29777, + "ĠArchitecture": 29778, + "Ġtucked": 29779, + "Ġbalcony": 29780, + "Ġearthquakes": 29781, + "quire": 29782, + "Future": 29783, + "Ġhefty": 29784, + "éĹ": 29785, + "Ġspecializes": 29786, + "Ġstresses": 29787, + "Ġsender": 29788, + "Ġmisunderstanding": 29789, + "Ġepile": 29790, + "Ġprovoke": 29791, + "ĠColors": 29792, + "Ġdismay": 29793, + "uko": 29794, + "[_": 29795, + "586": 29796, + "neutral": 29797, + "Ġdonating": 29798, + "ĠRandall": 29799, + "Multi": 29800, + "Ġconveniently": 29801, + "ĠSung": 29802, + "ĠCoca": 29803, + "Ġtents": 29804, + "ĠAcceler": 29805, + "Ġpartnered": 29806, + "272": 29807, + "irming": 29808, + "ĠBAS": 29809, + "sometimes": 29810, + "Ġobjected": 29811, + "ubric": 29812, + "posed": 29813, + "LCS": 29814, + "grass": 29815, + "Ġattributable": 29816, + "VIS": 29817, + "Israeli": 29818, + "Ġrepeats": 29819, + "ĠRM": 29820, + "vag": 29821, + "uta": 29822, + "inous": 29823, + "Ġinert": 29824, + "ĠMiguel": 29825, + "æŃ": 29826, + "ĠHawaiian": 29827, + "Board": 29828, + "Ġartific": 29829, + "ĠAzerbai": 29830, + "asio": 29831, + "ĠRent": 29832, + "AIN": 29833, + "Ġappliances": 29834, + "Ġnationality": 29835, + "Ġasshole": 29836, + "ĠNeb": 29837, + "Ġnotch": 29838, + "hani": 29839, + "ĠBride": 29840, + "Availability": 29841, + "Ġintercepted": 29842, + "Ġcontinental": 29843, + "Ġswelling": 29844, + "ĠPerspect": 29845, + "bies": 29846, + ".<": 29847, + "ithmetic": 29848, + "ĠLara": 29849, + "Ġtempting": 29850, + "addr": 29851, + "Ġoverseeing": 29852, + "clad": 29853, + "ĠDV": 29854, + "ĠGingrich": 29855, + "Ġmun": 29856, + "ĠAppropri": 29857, + "Ġalterations": 29858, + "ĠPatreon": 29859, + "Ġhavoc": 29860, + "Ġdisciplines": 29861, + "Ġnotoriously": 29862, + "akuya": 29863, + "ieri": 29864, + "?).": 29865, + "ĠWent": 29866, + "Ġsilicon": 29867, + "Ġtremb": 29868, + "Container": 29869, + "Known": 29870, + "Ġmortar": 29871, + "este": 29872, + "icka": 29873, + "Arthur": 29874, + "ĠPreviously": 29875, + "ĠMarty": 29876, + "Ġsparse": 29877, + "gins": 29878, + "Ġinward": 29879, + "ĠParticipant": 29880, + "Copy": 29881, + "ĠMisc": 29882, + "Ġantibiotic": 29883, + "ĠRetro": 29884, + "Ġelusive": 29885, + "Ġassail": 29886, + "ĠBattalion": 29887, + "ĠBought": 29888, + "Ġdiminish": 29889, + "ĠEuropa": 29890, + "session": 29891, + "ĠDangerous": 29892, + "iesel": 29893, + "Ġdisbelief": 29894, + "Ġblasts": 29895, + "extreme": 29896, + "ĠBoyd": 29897, + "ĠProjects": 29898, + "ĠGuys": 29899, + "Ġundergone": 29900, + "Ġgrill": 29901, + "ĠDwight": 29902, + "Ġ197": 29903, + "USER": 29904, + "Ġfilesystem": 29905, + "Ġclocks": 29906, + "Taylor": 29907, + "Ġwrapper": 29908, + "Ġfolding": 29909, + "ousand": 29910, + "ĠPhilippine": 29911, + "ATIONAL": 29912, + "ĠPerth": 29913, + "Ġashes": 29914, + "Ġaccumulate": 29915, + "ĠGateway": 29916, + "Shop": 29917, + "orkshire": 29918, + "Han": 29919, + "ĠBarrel": 29920, + "ĠLeh": 29921, + "ĠXV": 29922, + "Ġwhim": 29923, + "Ġrepo": 29924, + "ĠCG": 29925, + "ĠMam": 29926, + "Ġincorporating": 29927, + "Ġbailout": 29928, + "Ġlinguistic": 29929, + "Ġdisinteg": 29930, + "CLE": 29931, + "Ġcinematic": 29932, + "ĠFiber": 29933, + "Syn": 29934, + "ilion": 29935, + "ĠCompos": 29936, + "chens": 29937, + "Ġneoc": 29938, + "Ġboiled": 29939, + "FINE": 29940, + "ono": 29941, + "uncle": 29942, + "iken": 29943, + "ĠBM": 29944, + "ι": 29945, + "Ġreceipts": 29946, + "Ġdisposed": 29947, + "ĠThirty": 29948, + "ĠRough": 29949, + "ĠABS": 29950, + "Ġnotwithstanding": 29951, + "ollen": 29952, + "#$": 29953, + "Ġunreliable": 29954, + "Ġbloom": 29955, + "Ġmediocre": 29956, + "Ġtram": 29957, + "ĠTasman": 29958, + "Ġshakes": 29959, + "Ġmanifesto": 29960, + "ĠMW": 29961, + "Ġsatisfactory": 29962, + "Ġshores": 29963, + "Ġcomputation": 29964, + "Ġassertions": 29965, + "ormons": 29966, + "arag": 29967, + "abit": 29968, + "Democrats": 29969, + "ĠLoot": 29970, + "ĠVolks": 29971, + "haired": 29972, + "Ġgravitational": 29973, + "Sing": 29974, + "ĠMiz": 29975, + "Ġthrottle": 29976, + "Ġtyranny": 29977, + "ĠViews": 29978, + "Ġrobber": 29979, + "ĠMinority": 29980, + "Ġshrine": 29981, + "scope": 29982, + "purpose": 29983, + "Ġnucleus": 29984, + "ourcing": 29985, + "ĠUSDA": 29986, + "ĠDHS": 29987, + "wra": 29988, + "ĠBowie": 29989, + "Scale": 29990, + "ĠBEL": 29991, + "xi": 29992, + "Iter": 29993, + "Ġ(),": 29994, + "wright": 29995, + "Ġsailors": 29996, + "oused": 29997, + "NASA": 29998, + "ĠProof": 29999, + "ĠMineral": 30000, + "token": 30001, + "ĠFD": 30002, + "Rew": 30003, + "Ġell": 30004, + "630": 30005, + "Ġchancellor": 30006, + "ĠGos": 30007, + "Ġamounted": 30008, + "ĠRecre": 30009, + "omez": 30010, + "ĠOptim": 30011, + "ĠOlive": 30012, + "Ġtracker": 30013, + "owler": 30014, + "ĠUnique": 30015, + "Root": 30016, + "Ġmaritime": 30017, + "ĠQuran": 30018, + "ĠAdapt": 30019, + "Ġecosystems": 30020, + "ĠRepeat": 30021, + "ĠSoy": 30022, + "ĠIMP": 30023, + "Ġgraduating": 30024, + "andem": 30025, + "Pur": 30026, + "ĠReset": 30027, + "ĠTrick": 30028, + "ĠPhilly": 30029, + "ĠTue": 30030, + "ĠMalaysian": 30031, + "Ġclimax": 30032, + "Ġbury": 30033, + "Ġconspic": 30034, + "ĠSouthampton": 30035, + "ĠFlowers": 30036, + "Ġescorted": 30037, + "ĠEducational": 30038, + "ĠIRC": 30039, + "Ġbrutally": 30040, + "eating": 30041, + "Ġpillar": 30042, + "ĠSang": 30043, + "ĠJude": 30044, + "arling": 30045, + "ĠAmnesty": 30046, + "Ġreminding": 30047, + "ĠAdministrative": 30048, + "hesda": 30049, + "Ġflashed": 30050, + "ĠPBS": 30051, + "perate": 30052, + "feature": 30053, + "Ġswipe": 30054, + "Ġgraves": 30055, + "oultry": 30056, + "261": 30057, + "breaks": 30058, + "ĠGuer": 30059, + "Ġshrimp": 30060, + "ĠVoting": 30061, + "quist": 30062, + "Ġanalytical": 30063, + "Ġtablespoons": 30064, + "ĠSOU": 30065, + "Ġresearched": 30066, + "Ġdisrupted": 30067, + "Ġjour": 30068, + "Ġreplica": 30069, + "Ġcartoons": 30070, + "bians": 30071, + "})": 30072, + "copy": 30073, + "Got": 30074, + "ouched": 30075, + "PUT": 30076, + "Ġswarm": 30077, + "notations": 30078, + "said": 30079, + "Ġrebuilt": 30080, + "Ġcollaborate": 30081, + "Ġraging": 30082, + "Ġnar": 30083, + "Ġdemographics": 30084, + "ĠDDR": 30085, + "Ġdistrust": 30086, + "ossier": 30087, + "ĠKro": 30088, + "Ġpumpkin": 30089, + "Ġregrets": 30090, + "Ġfatalities": 30091, + "ĠLens": 30092, + "ĠOle": 30093, + "pd": 30094, + "Ġpuppet": 30095, + "ĠOutlook": 30096, + "ĠStam": 30097, + "Ol": 30098, + "Fair": 30099, + "UU": 30100, + "Ġrewritten": 30101, + "ı": 30102, + "Ġfascinated": 30103, + "Ġvectors": 30104, + "Ġtribunal": 30105, + "uay": 30106, + "ĠMats": 30107, + "ĠCoins": 30108, + "[[": 30109, + "Ġ181": 30110, + "Ġrenders": 30111, + "ĠKaepernick": 30112, + "Ġespionage": 30113, + "Ġsumm": 30114, + "Ġditch": 30115, + "Account": 30116, + "Ġspreadsheet": 30117, + "Ġmutant": 30118, + "past": 30119, + "407": 30120, + "Ġdye": 30121, + "Ġinitiation": 30122, + "Ġ4000": 30123, + "Ġpunishable": 30124, + "Ġthinner": 30125, + "ĠKhal": 30126, + "Ġintermedi": 30127, + "Dun": 30128, + "ĠGotham": 30129, + "Ġeagerly": 30130, + "Ġvaginal": 30131, + "powers": 30132, + "VW": 30133, + "ĠWATCHED": 30134, + "Ġpredator": 30135, + "amsung": 30136, + "Ġdisparity": 30137, + "Ġ[*": 30138, + "Ġamph": 30139, + "Ġoutskirts": 30140, + "ĠSpirits": 30141, + "Ġskeletal": 30142, + "л": 30143, + "ĠRear": 30144, + "Ġissuance": 30145, + "ĠLogic": 30146, + "released": 30147, + "ZZ": 30148, + "ĠBound": 30149, + "Entry": 30150, + "Ġexits": 30151, + "isol": 30152, + "ĠFounder": 30153, + "Ġwre": 30154, + "ĠGreenland": 30155, + "ĠMMO": 30156, + "taker": 30157, + "INC": 30158, + "ãģ¾": 30159, + "Ġhourly": 30160, + "henko": 30161, + "Ġfantasies": 30162, + "Ġdisob": 30163, + "Ġdemolition": 30164, + "ãĥĭ": 30165, + "Ġenlisted": 30166, + "ratulations": 30167, + "Ġmisguided": 30168, + "Ġensured": 30169, + "Ġdiscouraged": 30170, + "mort": 30171, + "Ġflank": 30172, + "Ġcess": 30173, + "Ġreacts": 30174, + "ĠSere": 30175, + "sensitive": 30176, + "ĠSerpent": 30177, + "assad": 30178, + "Ġ247": 30179, + "Ġcalmly": 30180, + "busters": 30181, + "Ġbleed": 30182, + "ĠStro": 30183, + "Ġamusement": 30184, + "ĠAntarctica": 30185, + "Ġscept": 30186, + "ĠGaw": 30187, + "aq": 30188, + "asonic": 30189, + "Ġsprawling": 30190, + "native": 30191, + "aturated": 30192, + "ĠBattlefield": 30193, + "IVERS": 30194, + "EB": 30195, + "ĠGems": 30196, + "ĠNorthwestern": 30197, + "ĠFilms": 30198, + "ĠAutomatic": 30199, + "Ġapprehend": 30200, + "ãģ¨": 30201, + "ĠguiName": 30202, + "Ġbackend": 30203, + "Ġevidenced": 30204, + "geant": 30205, + "012": 30206, + "ĠSiege": 30207, + "ĠexternalTo": 30208, + "ĠunfocusedRange": 30209, + "ĠguiActiveUnfocused": 30210, + "ĠguiIcon": 30211, + "ĠexternalToEVA": 30212, + "ĠexternalToEVAOnly": 30213, + "Fri": 30214, + "chard": 30215, + "enaries": 30216, + "Ġchiefs": 30217, + "Ġcf": 30218, + "ĠHUD": 30219, + "Ġcorrobor": 30220, + "ĠdB": 30221, + "ĠTaken": 30222, + "ĠPatricia": 30223, + "rail": 30224, + "ĠCharm": 30225, + "ĠLibertarian": 30226, + "rieve": 30227, + "Personal": 30228, + "ĠOUR": 30229, + "geries": 30230, + "Ġdumping": 30231, + "Ġneurological": 30232, + "itimate": 30233, + "ĠClintons": 30234, + "rafted": 30235, + "ĠMolly": 30236, + "Ġterminals": 30237, + "register": 30238, + "Ġflare": 30239, + "Ġencoded": 30240, + "Ġautopsy": 30241, + "pel": 30242, + "machine": 30243, + "Ġexemptions": 30244, + "ĠRoyals": 30245, + "distance": 30246, + "Ġdrafts": 30247, + "Ġlame": 30248, + "ĠCunning": 30249, + "Ġspouses": 30250, + "ĠMarkets": 30251, + "ĠCarrier": 30252, + "Ġimplying": 30253, + "ĠYak": 30254, + "sid": 30255, + "Ġloser": 30256, + "Ġvigilant": 30257, + "Ġimpeachment": 30258, + "Ġaugmented": 30259, + "ĠEmployees": 30260, + "Ġunintended": 30261, + "ternally": 30262, + "ĠWatt": 30263, + "Ġrecognizable": 30264, + "essim": 30265, + "æĿ": 30266, + "Ġcoated": 30267, + "rha": 30268, + "Ġlieutenant": 30269, + "ĠLegislation": 30270, + "published": 30271, + "444": 30272, + "013": 30273, + "Ġideally": 30274, + "ĠPassword": 30275, + "Ġsimplify": 30276, + "ĠMeta": 30277, + "ĠMRI": 30278, + "Ġpleading": 30279, + "organized": 30280, + "handler": 30281, + "Ġunravel": 30282, + "correct": 30283, + "Ġicy": 30284, + "Ġparanoid": 30285, + "Ġpasser": 30286, + "Ġinspections": 30287, + "ofer": 30288, + "ĠHealthcare": 30289, + "283": 30290, + "ĠBrut": 30291, + "iola": 30292, + "forge": 30293, + "ĠMedieval": 30294, + "MSN": 30295, + "ievers": 30296, + "ĠProgramming": 30297, + "åī": 30298, + "Ġ223": 30299, + "mu": 30300, + "ĠCLE": 30301, + "uga": 30302, + "Ġshoppers": 30303, + "Ġinformative": 30304, + "ĠPlans": 30305, + "Ġsupplementation": 30306, + "ĠTests": 30307, + "tyard": 30308, + "ocytes": 30309, + "ĠVega": 30310, + "ĠGujarat": 30311, + "ermanent": 30312, + "Except": 30313, + "ĠLOT": 30314, + "alla": 30315, + "ĠCumm": 30316, + "ĠOsw": 30317, + "Ġvenom": 30318, + "ĠDebt": 30319, + "ĠDOWN": 30320, + "Ġreunion": 30321, + "Ġmuc": 30322, + "ĠRelief": 30323, + "Ġgeop": 30324, + "ĠðŁĺ": 30325, + "alogue": 30326, + "Anth": 30327, + "echo": 30328, + "Ġcorros": 30329, + "Ġreplication": 30330, + "ĠBlazing": 30331, + "ĠDaughter": 30332, + "Ġinflic": 30333, + "ĠLindsey": 30334, + "ÙĪ": 30335, + "284": 30336, + "Exit": 30337, + "Ġgloom": 30338, + "TAIN": 30339, + "Ġundermining": 30340, + "Ġadvising": 30341, + "hidden": 30342, + "Ġoverflow": 30343, + "Ġgor": 30344, + "urdue": 30345, + "Ġechoes": 30346, + "enhagen": 30347, + "Ġimpuls": 30348, + "drug": 30349, + "cash": 30350, + "Ġasync": 30351, + "Ġmirac": 30352, + "atts": 30353, + "punk": 30354, + "Ġpivot": 30355, + "ĠLegislative": 30356, + "Ġbloggers": 30357, + "ĠClaw": 30358, + "sburg": 30359, + "dyl": 30360, + "ĠRecommend": 30361, + "Ġverte": 30362, + "Ġprohibiting": 30363, + "ĠPanther": 30364, + "Jonathan": 30365, + "Ġomin": 30366, + "Ġhateful": 30367, + "281": 30368, + "ĠOrche": 30369, + "ĠMurdoch": 30370, + "downs": 30371, + "Ġasymm": 30372, + "GER": 30373, + "Always": 30374, + "Ġinforms": 30375, + "ĠWM": 30376, + "ĠPony": 30377, + "ĠAppendix": 30378, + "ĠArlington": 30379, + "Jam": 30380, + "Ġmedicinal": 30381, + "ĠSlam": 30382, + "ITIES": 30383, + "Ġreaff": 30384, + "ĠRi": 30385, + "FG": 30386, + "Spring": 30387, + "bool": 30388, + "Ġthighs": 30389, + "Ġmarkings": 30390, + "ĠRaqqa": 30391, + "ĠLak": 30392, + "poll": 30393, + "tsky": 30394, + "ĠMorty": 30395, + "ĠDefinition": 30396, + "Ġdebunk": 30397, + "endered": 30398, + "ĠLeone": 30399, + "avers": 30400, + "Ġmortgages": 30401, + "Apparently": 30402, + "Nic": 30403, + "haus": 30404, + "ĠThousands": 30405, + "auld": 30406, + "Ġmash": 30407, + "shoot": 30408, + "Ġdiarr": 30409, + "Ġconsciously": 30410, + "Hero": 30411, + "eas": 30412, + "ĠNaturally": 30413, + "ĠDestroyer": 30414, + "Ġdashboard": 30415, + "services": 30416, + "Rog": 30417, + "Ġmillennials": 30418, + "Ġinvade": 30419, + "-(": 30420, + "Ġcommissions": 30421, + "ĠAuckland": 30422, + "Ġbroadcasts": 30423, + "Ġfrontal": 30424, + "Ġcrank": 30425, + "ĠHistoric": 30426, + "Ġrumours": 30427, + "CTV": 30428, + "Ġsteril": 30429, + "Ġbooster": 30430, + "rocket": 30431, + "ãĤ¼": 30432, + "utsche": 30433, + "ĠPI": 30434, + "Ġ233": 30435, + "ĠProducer": 30436, + "ĠAnalytics": 30437, + "Ġinvaluable": 30438, + "Ġunintention": 30439, + "ĠCY": 30440, + "Ġscrutin": 30441, + "Ġgigg": 30442, + "Ġengulf": 30443, + "Ġproletariat": 30444, + "Ġhacks": 30445, + "ĠHew": 30446, + "arak": 30447, + "ĠSlime": 30448, + "ielding": 30449, + "agher": 30450, + "ĠElliot": 30451, + "Ġtelecom": 30452, + "Ġ219": 30453, + "ultan": 30454, + "ĠArbor": 30455, + "ĠScouts": 30456, + "Ban": 30457, + "Ġlifespan": 30458, + "Ġblasp": 30459, + "388": 30460, + "Ġjudiciary": 30461, + "ĠContinental": 30462, + "asking": 30463, + "McC": 30464, + "LED": 30465, + "Ġbaggage": 30466, + "ĠSorcerer": 30467, + "Ġremnants": 30468, + "ĠGriffith": 30469, + "etsu": 30470, + "ĠSubaru": 30471, + "ĠPersonality": 30472, + "designed": 30473, + "ushima": 30474, + "agnar": 30475, + "Ġrecoil": 30476, + "Ġpassions": 30477, + "\\\":": 30478, + "Ġtee": 30479, + "Ġabolition": 30480, + "ĠCreating": 30481, + "jac": 30482, + "Ġ194": 30483, + "019": 30484, + "Ġpillars": 30485, + "riched": 30486, + "/\"": 30487, + "tk": 30488, + "Ġlivelihood": 30489, + "Ġroasted": 30490, + "ahon": 30491, + "ĠHutch": 30492, + "assert": 30493, + "Ġdividend": 30494, + "Ġknit": 30495, + "Ġdaunting": 30496, + "Ġdisturbance": 30497, + "Ġshale": 30498, + "Ġcultivated": 30499, + "Ġrefrigerator": 30500, + "LB": 30501, + "ĠNET": 30502, + "Ġcommercials": 30503, + "Ġthinkers": 30504, + "455": 30505, + "Ġchop": 30506, + "Broad": 30507, + "Ġsuspicions": 30508, + "Ġtagged": 30509, + "lifting": 30510, + "Ġstylish": 30511, + "ĠShields": 30512, + "Shortly": 30513, + "Ġtails": 30514, + "Auth": 30515, + "STE": 30516, + "ĠGAME": 30517, + "Ġseism": 30518, + "ĠKis": 30519, + "ologne": 30520, + "Ġcowork": 30521, + "Ġforcibly": 30522, + "Ġthyroid": 30523, + "ĠPB": 30524, + "ANE": 30525, + "married": 30526, + "horse": 30527, + "Ġpolymer": 30528, + "ĠChal": 30529, + "odor": 30530, + "DEBUG": 30531, + "ĠContext": 30532, + "Ġbliss": 30533, + "Ġpinpoint": 30534, + "ĠMathemat": 30535, + "legram": 30536, + "ĠWeekend": 30537, + "Ġlabelled": 30538, + "Ġbart": 30539, + "itles": 30540, + "Ġestrogen": 30541, + "âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ": 30542, + "\"'": 30543, + "Ġvisibly": 30544, + "Ġoutsider": 30545, + "aida": 30546, + "Area": 30547, + "Ġdissemin": 30548, + "Ġdishonest": 30549, + "ĠClosed": 30550, + "ĠBulletin": 30551, + "ĠRamsey": 30552, + "sword": 30553, + "ĠXI": 30554, + "ourced": 30555, + "Same": 30556, + "346": 30557, + "ĠRepe": 30558, + "ĠKou": 30559, + "cake": 30560, + "emis": 30561, + "Cache": 30562, + "ĠMeaning": 30563, + "ĠEnlight": 30564, + "onomy": 30565, + "Ġmanifestation": 30566, + "sworth": 30567, + "Jay": 30568, + "Ġchore": 30569, + "ör": 30570, + "Dream": 30571, + "Ġsanctioned": 30572, + "Ġculturally": 30573, + "ĠAra": 30574, + "Nav": 30575, + "Ġtheological": 30576, + "Ġstrut": 30577, + "ĠVO": 30578, + "ĠHandbook": 30579, + "Ġconstructing": 30580, + "Ġ¶": 30581, + "ĠBenefits": 30582, + "ĠPsychological": 30583, + "sac": 30584, + "å¸": 30585, + "policy": 30586, + "ĠMatters": 30587, + "ĠReported": 30588, + "ĠByte": 30589, + "Ġvitro": 30590, + "ĠMaiden": 30591, + "Ġlam": 30592, + "ĠJennings": 30593, + "Ġgarment": 30594, + "ĠRutgers": 30595, + "ĠStafford": 30596, + "ĠWellington": 30597, + "Ġintermitt": 30598, + "Ġnpm": 30599, + "Ġordeal": 30600, + "Ġplugged": 30601, + "ooming": 30602, + "inished": 30603, + "framework": 30604, + "Ġtimber": 30605, + "Ġcass": 30606, + "Ġ850": 30607, + "iless": 30608, + "ĠRedux": 30609, + "768": 30610, + "Stre": 30611, + "Ġsurpassed": 30612, + "whel": 30613, + "Ġparallels": 30614, + "Ġveil": 30615, + "ĠGI": 30616, + "ĠREST": 30617, + "Ġreadiness": 30618, + "sort": 30619, + "Ġmodifying": 30620, + "ĠSlate": 30621, + "ruff": 30622, + "Ġmarble": 30623, + "Ġinfrared": 30624, + "Ġauditor": 30625, + "ĠFANTASY": 30626, + "ĠPoverty": 30627, + "ĠSPD": 30628, + "Ġ\"(": 30629, + "Ky": 30630, + "RAY": 30631, + "Ġexecutions": 30632, + "ĠBeverly": 30633, + "ĠMarxism": 30634, + "ĠBurst": 30635, + "ĠKali": 30636, + "estones": 30637, + "Clearly": 30638, + "Ell": 30639, + "ãģ§": 30640, + "ĠProceedings": 30641, + "Token": 30642, + "IFIC": 30643, + "ña": 30644, + "Central": 30645, + "ĠHaley": 30646, + "ĠDrama": 30647, + "Ġformations": 30648, + "ORN": 30649, + "Books": 30650, + "Ġdominating": 30651, + "ĠFlyers": 30652, + "ĠCompanion": 30653, + "Ġdisciplined": 30654, + "ĠYugoslav": 30655, + "ĠSpells": 30656, + "Ġvengeance": 30657, + "Ġlandlords": 30658, + "Len": 30659, + "ĠOgre": 30660, + "anoia": 30661, + "Ġpiercing": 30662, + "Ġcongreg": 30663, + "Ġscorer": 30664, + "obia": 30665, + "Ġnickel": 30666, + "ĠLearns": 30667, + "Ġrejo": 30668, + "Ġmasterpiece": 30669, + "Flash": 30670, + "Ġinhabited": 30671, + "ĠOpenGL": 30672, + "ĠDud": 30673, + "ĠICO": 30674, + "Ġarter": 30675, + "Ġplur": 30676, + "Ġmastery": 30677, + "Ġlongstanding": 30678, + "sted": 30679, + "Ġwines": 30680, + "Ġtelevised": 30681, + "ĠShrine": 30682, + "ĠBayern": 30683, + "Ġâĵĺ": 30684, + "Ġenclosure": 30685, + "john": 30686, + "Ġprophets": 30687, + "ĠResurrection": 30688, + "ĠOrders": 30689, + "Ġuneven": 30690, + "rals": 30691, + "Ġdwind": 30692, + "ĠLah": 30693, + "ĠSloven": 30694, + "378": 30695, + "Ġinsistence": 30696, + "affle": 30697, + "ĠClone": 30698, + "Ġhardship": 30699, + "ĠCongressman": 30700, + "Ġplead": 30701, + "Ġreviewers": 30702, + "Ġcured": 30703, + "Ġ1935": 30704, + "asley": 30705, + "fake": 30706, + "ĠThinking": 30707, + "ydia": 30708, + "PART": 30709, + "ĠDota": 30710, + "oit": 30711, + "Ġwhipped": 30712, + "Ġbouncing": 30713, + "ĠHispanics": 30714, + "comings": 30715, + "Ġcannabin": 30716, + "ĠChambers": 30717, + "ĠZack": 30718, + "Optional": 30719, + "Ġcoats": 30720, + "Ġprowess": 30721, + "ĠNorton": 30722, + "Ġplainly": 30723, + "Ġfreight": 30724, + "Ġinhibition": 30725, + "Ġclam": 30726, + "Ġ303": 30727, + "kef": 30728, + "aleigh": 30729, + "Luke": 30730, + "Ġpsycho": 30731, + "atorium": 30732, + "MED": 30733, + "Ġtreaties": 30734, + "Ġindisc": 30735, + "Ġdc": 30736, + "OPS": 30737, + "Ġresilient": 30738, + "ĠInterstate": 30739, + "Ġslack": 30740, + "Ġmundane": 30741, + "Ġestablishes": 30742, + "359": 30743, + "Ġstrained": 30744, + "Ġnond": 30745, + "Sus": 30746, + "Ġcaste": 30747, + "arate": 30748, + "ieving": 30749, + "Ġunfairly": 30750, + "Ġparser": 30751, + "onial": 30752, + "ursive": 30753, + "Via": 30754, + "ĠOtto": 30755, + "ĠAuthorities": 30756, + "stroke": 30757, + "KR": 30758, + "ĠMercy": 30759, + "Ġfurnished": 30760, + "Ġoutset": 30761, + "Ġmetic": 30762, + "1982": 30763, + "olithic": 30764, + "ĠTent": 30765, + "ogical": 30766, + "ĠAircraft": 30767, + "Ġhides": 30768, + "ĠBecame": 30769, + "Ġeducators": 30770, + "reaching": 30771, + "Ġvolatility": 30772, + "Ġtoddler": 30773, + "ĠNASCAR": 30774, + "ĠTwelve": 30775, + "ĠHighlights": 30776, + "Ġgrape": 30777, + "Ġsplits": 30778, + "Ġpeasant": 30779, + "Ġreneg": 30780, + "ĠMSI": 30781, + "Temp": 30782, + "stars": 30783, + "Ġtrek": 30784, + "ĠHyde": 30785, + "binding": 30786, + "Ġrealism": 30787, + "Ġoxide": 30788, + "ĠHos": 30789, + "Ġmounts": 30790, + "Ġbiting": 30791, + "Ġcollapsing": 30792, + "Ġpostal": 30793, + "Ġmuseums": 30794, + "Ġdetached": 30795, + "Ġrespecting": 30796, + "Ġmonopol": 30797, + "Ġworkflow": 30798, + "ĠCake": 30799, + "Template": 30800, + "ĠOrganisation": 30801, + "Ġpersistence": 30802, + "369": 30803, + "Coming": 30804, + "Brad": 30805, + "Ġredundant": 30806, + "ĠGTA": 30807, + "Ġbending": 30808, + "Ġrevoked": 30809, + "Ġoffending": 30810, + "Ġframing": 30811, + "Ġprintf": 30812, + "Commun": 30813, + "members": 30814, + "Outside": 30815, + "Ġconstrued": 30816, + "Ġcoded": 30817, + "FORE": 30818, + "Ġchast": 30819, + "Chat": 30820, + "Indian": 30821, + "ĠYard": 30822, + "?!\"": 30823, + "ĠPorts": 30824, + "ĠXavier": 30825, + "ĠRET": 30826, + "'.\"": 30827, + "ĠBoat": 30828, + "ivated": 30829, + "icht": 30830, + "umerable": 30831, + "Ds": 30832, + "ĠDunn": 30833, + "Ġcoffin": 30834, + "Ġsecurely": 30835, + "ĠRaptors": 30836, + "ĠBes": 30837, + "Installation": 30838, + "Ġinception": 30839, + "ĠHealthy": 30840, + "endants": 30841, + "Ġpsychologists": 30842, + "ĠSheikh": 30843, + "cultural": 30844, + "ĠBlackBerry": 30845, + "shift": 30846, + "Fred": 30847, + "oche": 30848, + "Ġcakes": 30849, + "ĠSEO": 30850, + "ĠGian": 30851, + "ĠAsians": 30852, + "ogging": 30853, + "element": 30854, + "Ġpundits": 30855, + "ĠVaugh": 30856, + "ĠGavin": 30857, + "Ġhitter": 30858, + "Ġdrowned": 30859, + "Ġchalk": 30860, + "ĠZika": 30861, + "Ġmeasles": 30862, + "802": 30863, + "âĢ¦..": 30864, + "ĠAWS": 30865, + "]\"": 30866, + "Ġdistort": 30867, + "ĠMast": 30868, + "Ġantibodies": 30869, + "ĠMash": 30870, + "Memory": 30871, + "ĠUganda": 30872, + "ĠProb": 30873, + "Ġvomiting": 30874, + "ĠTurns": 30875, + "Ġoccupying": 30876, + "Ġevasion": 30877, + "ĠTherapy": 30878, + "Ġpromo": 30879, + "Ġelectr": 30880, + "Ġblueprint": 30881, + "ĠDre": 30882, + "priced": 30883, + "ĠDepot": 30884, + "Ġalleviate": 30885, + "ĠSomali": 30886, + "marg": 30887, + "nine": 30888, + "Ġnostalgia": 30889, + "ĠShepherd": 30890, + "Ġcavalry": 30891, + "Ġtorped": 30892, + "ĠBloody": 30893, + "xb": 30894, + "Ġsank": 30895, + "Ġgoalt": 30896, + "reportprint": 30897, + "embedreportprint": 30898, + "cloneembedreportprint": 30899, + "ĠInitially": 30900, + "ĠFischer": 30901, + "Ġnoteworthy": 30902, + "cern": 30903, + "Ġinefficient": 30904, + "rawdownload": 30905, + "rawdownloadcloneembedreportprint": 30906, + "cation": 30907, + "ĠDynasty": 30908, + "lag": 30909, + "DES": 30910, + "Ġdistinctly": 30911, + "ĠEstonia": 30912, + "Ġopenness": 30913, + "Ġgossip": 30914, + "ruck": 30915, + "Width": 30916, + "ĠIbrahim": 30917, + "Ġpetroleum": 30918, + "Ġavatar": 30919, + "ĠHed": 30920, + "atha": 30921, + "ĠHogwarts": 30922, + "Ġcaves": 30923, + "678": 30924, + "Ġsafeguard": 30925, + "ĠMog": 30926, + "isson": 30927, + "ĠDurham": 30928, + "slaught": 30929, + "ĠGraduate": 30930, + "Ġsubconscious": 30931, + "ĠExcellent": 30932, + "ĠDum": 30933, + "-----": 30934, + "Ġpiles": 30935, + "ĠWORK": 30936, + "ĠGarn": 30937, + "ĠFol": 30938, + "ĠATM": 30939, + "Ġavoids": 30940, + "ĠTul": 30941, + "Ġbleak": 30942, + "ELY": 30943, + "ivist": 30944, + "lightly": 30945, + "Pers": 30946, + "ĠDob": 30947, + "ĠLS": 30948, + "Ġinsanity": 30949, + "ε": 30950, + "atalie": 30951, + "Enlarge": 30952, + "Ġtwists": 30953, + "Ġfaulty": 30954, + "Ġpiracy": 30955, + "Ġimpover": 30956, + "Ġrugged": 30957, + "ĠFashion": 30958, + "Ġsands": 30959, + "'?": 30960, + "swick": 30961, + "Ġnatives": 30962, + "Ġhen": 30963, + "ĠNoise": 30964, + "ãĥĹ": 30965, + "Ġgreens": 30966, + "Ġfreezer": 30967, + "Ġdynasty": 30968, + "ĠFathers": 30969, + "ĠNewark": 30970, + "Ġarchaeological": 30971, + "Ġot": 30972, + "obar": 30973, + "Ġblockade": 30974, + "Ġallerg": 30975, + "LV": 30976, + "Ġdebit": 30977, + "ĠRFC": 30978, + "ĠMilton": 30979, + "ĠPressure": 30980, + "Ġwillingly": 30981, + "Ġdisproportionate": 30982, + "Ġoppressive": 30983, + "Ġdiamonds": 30984, + "Ġbelongings": 30985, + "1970": 30986, + "Ġbells": 30987, + "Ġimperialism": 30988, + "Ġ227": 30989, + "Ġexploding": 30990, + "ĠEclipse": 30991, + "Ġ1919": 30992, + "Ġrant": 30993, + "Ġnominations": 30994, + "347": 30995, + "Ġpeacefully": 30996, + "rica": 30997, + "ĠFUCK": 30998, + "Ġvibration": 30999, + "malink": 31000, + "Ġropes": 31001, + "ĠIvanka": 31002, + "ĠBrewery": 31003, + "ĠBooker": 31004, + "ĠOwens": 31005, + "goers": 31006, + "Services": 31007, + "ĠSnape": 31008, + "Ġ191": 31009, + "395": 31010, + "Ġ299": 31011, + "justice": 31012, + "Ġbri": 31013, + "Ġdiscs": 31014, + "Ġprominently": 31015, + "Ġvulgar": 31016, + "Ġskipping": 31017, + "lves": 31018, + "Ġtsunami": 31019, + "374": 31020, + "ĠUrug": 31021, + "ĠEid": 31022, + "recated": 31023, + "phen": 31024, + "Ġfaults": 31025, + "ĠStarted": 31026, + "950": 31027, + "Ġpi": 31028, + "Ġdetector": 31029, + "Ġbastard": 31030, + "Ġvalidated": 31031, + "SpaceEngineers": 31032, + "OURCE": 31033, + "Ġ(~": 31034, + "Ġunsur": 31035, + "Ġaffirmed": 31036, + "Ġfascism": 31037, + "Ġresolving": 31038, + "ĠChavez": 31039, + "ĠCyn": 31040, + "Ġdetract": 31041, + "Lost": 31042, + "Ġrigged": 31043, + "Ġhomage": 31044, + "ĠBruno": 31045, + "555": 31046, + "eca": 31047, + "Ġpresses": 31048, + "Ġhumour": 31049, + "Ġspacing": 31050, + "Ġ'/": 31051, + "olkien": 31052, + "Coun": 31053, + "OPER": 31054, + "Tre": 31055, + "Son": 31056, + "ĠCambodia": 31057, + "ierre": 31058, + "mong": 31059, + "ozy": 31060, + "Ġliquidity": 31061, + "ĠSoviets": 31062, + "ĠFernando": 31063, + "Ġ229": 31064, + "Ġslug": 31065, + "ĠCatalan": 31066, + "electric": 31067, + "Ġscenery": 31068, + "ĠHearth": 31069, + "Ġconstrained": 31070, + "Ġgoalie": 31071, + "ĠGuidelines": 31072, + "ĠAmmo": 31073, + "ĠPearson": 31074, + "Ġtaxed": 31075, + "Ġfetus": 31076, + "Response": 31077, + "ĠAlexis": 31078, + "thia": 31079, + "Guy": 31080, + "Ġreconstruct": 31081, + "Ġextremes": 31082, + "Ġconcluding": 31083, + "ĠPeg": 31084, + "ooks": 31085, + "Ġdeductions": 31086, + "Rose": 31087, + "Ġgroundbreaking": 31088, + "ĠTarg": 31089, + "ãĥģ": 31090, + "ĠReve": 31091, + "resource": 31092, + "Ġmoons": 31093, + "Ġelectromagnetic": 31094, + "Ġamidst": 31095, + "ĠViktor": 31096, + "NESS": 31097, + "BACK": 31098, + "Ġcommute": 31099, + "ĠAnaheim": 31100, + "Ġfluctuations": 31101, + "640": 31102, + "Ġnoodles": 31103, + "ĠCopenhagen": 31104, + "ĠTide": 31105, + "ĠGrizz": 31106, + "ĠSEE": 31107, + "Ġpipelines": 31108, + "Ġscars": 31109, + "endo": 31110, + "agus": 31111, + "ĠETF": 31112, + "/#": 31113, + "ĠBecome": 31114, + "448": 31115, + "Ġvisc": 31116, + "ĠRecommended": 31117, + "Ġjumper": 31118, + "Ġcognition": 31119, + "Ġassassin": 31120, + "Ġwitnessing": 31121, + "ĠSetup": 31122, + "Ġlac": 31123, + "vim": 31124, + "ISM": 31125, + "pages": 31126, + "SSL": 31127, + "358": 31128, + "Ġadject": 31129, + "industrial": 31130, + "lore": 31131, + "chery": 31132, + "Ġglitter": 31133, + "Ġcalf": 31134, + "Florida": 31135, + "Ġspoilers": 31136, + "Ġsucceeds": 31137, + "Ġchanting": 31138, + "Ġslogans": 31139, + "ĠTracy": 31140, + "Visit": 31141, + "rology": 31142, + "Ġmornings": 31143, + "Ġlineage": 31144, + "Ġsip": 31145, + "Ġintensely": 31146, + "Ġflourish": 31147, + "ĠSleeping": 31148, + "ĠFem": 31149, + "orpor": 31150, + "ĠKlan": 31151, + "ĠDarth": 31152, + "hack": 31153, + "ĠNielsen": 31154, + "Ġtumors": 31155, + "Ġprocurement": 31156, + "ĠYorkshire": 31157, + "Ġraided": 31158, + "KY": 31159, + "Anna": 31160, + "Ġ//[": 31161, + "ĠDisorder": 31162, + "ĠMustang": 31163, + "ĠWen": 31164, + "ĠTrying": 31165, + "sq": 31166, + "Ġdeliveries": 31167, + "Ġshutter": 31168, + "Ġcerebral": 31169, + "Ġbipolar": 31170, + "ĠCN": 31171, + "lass": 31172, + "jet": 31173, + "Ġdebating": 31174, + ">:": 31175, + "Ġeagle": 31176, + "grades": 31177, + "ĠDixon": 31178, + "UGC": 31179, + "MAS": 31180, + "ĠDraco": 31181, + "ĠMachines": 31182, + "affer": 31183, + "Ġeman": 31184, + "²": 31185, + "pron": 31186, + "ĠGym": 31187, + "Ġcomparatively": 31188, + "ĠTribunal": 31189, + "PRO": 31190, + "Ġlex": 31191, + "Ġfertile": 31192, + "Ġdepressing": 31193, + "Ġsuperficial": 31194, + "essential": 31195, + "ĠHunters": 31196, + "gp": 31197, + "Ġprominence": 31198, + "Liber": 31199, + "ĠAncest": 31200, + "otechnology": 31201, + "Ġmocking": 31202, + "ĠTraff": 31203, + "ĸļ": 31204, + "Medium": 31205, + "Iraq": 31206, + "Ġpsychiatrist": 31207, + "Quantity": 31208, + "ĠLect": 31209, + "Ġnoisy": 31210, + "520": 31211, + "GY": 31212, + "Ġslapped": 31213, + "ĠMTV": 31214, + "Ġpara": 31215, + "pull": 31216, + "Multiple": 31217, + "asher": 31218, + "Ġnour": 31219, + "ĠSeg": 31220, + "Spell": 31221, + "vous": 31222, + "ordial": 31223, + "Senior": 31224, + "ĠGoldberg": 31225, + "ĠPlasma": 31226, + "need": 31227, + "Ġmessenger": 31228, + "eret": 31229, + "Ġteamed": 31230, + "Ġliteracy": 31231, + "ĠLeah": 31232, + "ĠDoyle": 31233, + "Ġemitted": 31234, + "UX": 31235, + "Ġevade": 31236, + "Ġmaze": 31237, + "Ġwrongly": 31238, + "ĠLars": 31239, + "Ġstereotype": 31240, + "Ġpledges": 31241, + "Ġaroma": 31242, + "ĠMET": 31243, + "Ġacre": 31244, + "ĠOD": 31245, + "Ġff": 31246, + "Ġbreweries": 31247, + "ĠHilton": 31248, + "undle": 31249, + "ĠKak": 31250, + "ĠThankfully": 31251, + "ĠCanucks": 31252, + "inctions": 31253, + "ĠAppears": 31254, + "Ġcoer": 31255, + "Ġundermined": 31256, + "rovers": 31257, + "Andre": 31258, + "Ġblaze": 31259, + "umers": 31260, + "Ġfamine": 31261, + "amphetamine": 31262, + "ulkan": 31263, + "Amount": 31264, + "Ġdesperation": 31265, + "wikipedia": 31266, + "development": 31267, + "ĠCorinth": 31268, + "ussia": 31269, + "Jackson": 31270, + "LI": 31271, + "Native": 31272, + "Rs": 31273, + "Ohio": 31274, + "ĠKathleen": 31275, + "Fortunately": 31276, + "Ġattendant": 31277, + "ĠPreferred": 31278, + "ĠDidn": 31279, + "ĠVs": 31280, + "Mis": 31281, + "Ġrespondent": 31282, + "Ġboun": 31283, + "stable": 31284, + "Ġpaved": 31285, + "Ġunexpl": 31286, + "ĠCheney": 31287, + "LM": 31288, + "ĠCull": 31289, + "blown": 31290, + "Ġconfronting": 31291, + "ocese": 31292, + "serving": 31293, + "Wi": 31294, + "ĠLithuania": 31295, + "anni": 31296, + "Ġstalk": 31297, + "hd": 31298, + "Ġvener": 31299, + "APH": 31300, + "ynchronous": 31301, + "URR": 31302, + "umably": 31303, + "historic": 31304, + "Half": 31305, + "Hay": 31306, + "Ġresilience": 31307, + "spection": 31308, + "Ġabandoning": 31309, + "Obs": 31310, + "ĠDebbie": 31311, + "Ġgradient": 31312, + "ĠPlaint": 31313, + "ĠCanal": 31314, + "ARCH": 31315, + "Ġexpansive": 31316, + "Ġfung": 31317, + "Ġbounced": 31318, + "Und": 31319, + "Ġprecautions": 31320, + "Ġclarification": 31321, + "Ġdagger": 31322, + "Ġgrips": 31323, + "Ġµ": 31324, + "ĠRivera": 31325, + "ĠUndead": 31326, + "isites": 31327, + "ĠFIRST": 31328, + "ño": 31329, + "audi": 31330, + "Ġhostages": 31331, + "Ġcompliant": 31332, + "Ġalumni": 31333, + "Seven": 31334, + "Ġcybersecurity": 31335, + "either": 31336, + "Collect": 31337, + "Ġinvariably": 31338, + "ĠSoci": 31339, + "Ġlawmaker": 31340, + "Ġale": 31341, + "ĠPersonally": 31342, + "Nazi": 31343, + "Ġcustomization": 31344, + "ĠProc": 31345, + "ĠSaskatchewan": 31346, + "eaturing": 31347, + "Ġspared": 31348, + "Ġdiscontinued": 31349, + "Ġcomputational": 31350, + "ĠMotorola": 31351, + "Ġsupremacist": 31352, + "governmental": 31353, + "Ġparadise": 31354, + "ĠDowning": 31355, + "ĠNikon": 31356, + "Ġcatalyst": 31357, + "berra": 31358, + "Toronto": 31359, + "875": 31360, + "beta": 31361, + "ĠMacron": 31362, + "Ġunrealistic": 31363, + "vector": 31364, + "ĠVehicles": 31365, + "itiveness": 31366, + "ĠRV": 31367, + "ĠColbert": 31368, + "sin": 31369, + "oji": 31370, + "entin": 31371, + "ĠKrish": 31372, + "hello": 31373, + "ffield": 31374, + "oky": 31375, + "ĠTate": 31376, + "Ġmaple": 31377, + "Ġaids": 31378, + "chemical": 31379, + "334": 31380, + "nuts": 31381, + "ĠWarp": 31382, + "Ġxx": 31383, + "ĠRobb": 31384, + "umerous": 31385, + "_-_": 31386, + "ftime": 31387, + "ĠVW": 31388, + "Ġwinger": 31389, + "ĠDome": 31390, + "tools": 31391, + "ĠPV": 31392, + "ĠGeorgetown": 31393, + "Ġgeared": 31394, + "Ġjihadists": 31395, + "Ġcp": 31396, + "Ġsteroids": 31397, + "Mother": 31398, + "clerosis": 31399, + "ĠDRM": 31400, + "nesia": 31401, + "Ġlinger": 31402, + "Ġimmersive": 31403, + "ĠCOUN": 31404, + "Ġoutweigh": 31405, + "ensual": 31406, + "Band": 31407, + "Ġtransforms": 31408, + "matched": 31409, + "psons": 31410, + "ĠJudicial": 31411, + "factor": 31412, + "Ġreferral": 31413, + "Ġoddly": 31414, + "ĠWenger": 31415, + "Bring": 31416, + "ĠBows": 31417, + "602": 31418, + "ICLE": 31419, + "Ġlions": 31420, + "ĠAcademic": 31421, + "ĠThorn": 31422, + "ĠRaider": 31423, + "kefeller": 31424, + "Storage": 31425, + "Lower": 31426, + "ĠOrt": 31427, + "ĠEquality": 31428, + "ALT": 31429, + "ĠSOC": 31430, + "Types": 31431, + "Ġlyn": 31432, + "ĠAsset": 31433, + "coat": 31434, + "TPP": 31435, + "CVE": 31436, + "ĠPioneer": 31437, + "application": 31438, + "Modern": 31439, + "ĠHK": 31440, + "Environment": 31441, + "Alright": 31442, + "Rain": 31443, + "IPP": 31444, + "ĠShiite": 31445, + "Ġmound": 31446, + "ĠAbilities": 31447, + "condition": 31448, + "Staff": 31449, + "Ġcompetence": 31450, + "ĠMoor": 31451, + "ĠDiablo": 31452, + "Ġwithheld": 31453, + "Ġostensibly": 31454, + "ĠBrom": 31455, + "Ġmsg": 31456, + "Ġdenomin": 31457, + "ĠReferences": 31458, + "ĠFP": 31459, + "Ġplunged": 31460, + "Ġpamph": 31461, + "moving": 31462, + "central": 31463, + "Ġdownright": 31464, + "Ġfading": 31465, + "Tal": 31466, + "Typ": 31467, + "ĠThy": 31468, + "ukes": 31469, + "ithe": 31470, + "Ġove": 31471, + "Ġbattled": 31472, + "Ġseafood": 31473, + "Ġfigur": 31474, + "ĠRD": 31475, + "crop": 31476, + "Ġsquads": 31477, + "{\\": 31478, + "à¹": 31479, + "ĠEh": 31480, + "Ġinterviewing": 31481, + "ĠQin": 31482, + "Ġaspiring": 31483, + "PLIC": 31484, + "Ġclauses": 31485, + "ĠGast": 31486, + "ĠNir": 31487, + "Ġluggage": 31488, + "Ġhose": 31489, + "Ġsystemd": 31490, + "Ġdescending": 31491, + "ĠRevised": 31492, + "ĠRails": 31493, + "align": 31494, + "709": 31495, + "337": 31496, + "Ġfug": 31497, + "charging": 31498, + "tags": 31499, + "Ġuter": 31500, + "kish": 31501, + "WARNING": 31502, + "490": 31503, + "profits": 31504, + "Ġvoyage": 31505, + "Ġace": 31506, + "ĠVanguard": 31507, + "ĠTanks": 31508, + "ĠMuk": 31509, + "Ġ226": 31510, + "Safe": 31511, + "Armor": 31512, + "Ġvolcanic": 31513, + "Ġwomb": 31514, + "ĠMIL": 31515, + "Ġbeginner": 31516, + "ĠRecogn": 31517, + "ĠAAP": 31518, + "PLAY": 31519, + ")!": 31520, + "Ġdetecting": 31521, + "cn": 31522, + "Ġbreaches": 31523, + "Basically": 31524, + "ĠPag": 31525, + "ĠMunicipal": 31526, + "ĠIndie": 31527, + "ĠLaf": 31528, + "ĠDisable": 31529, + "ĠOlson": 31530, + "Ġrestrained": 31531, + "Ġrulings": 31532, + "Ġhumane": 31533, + "events": 31534, + "ĠCinema": 31535, + "displayText": 31536, + "ĠHatch": 31537, + "actionDate": 31538, + "onnaissance": 31539, + "Ġassaulting": 31540, + "ĠLug": 31541, + "CHAT": 31542, + "Ġvigorous": 31543, + "ĠPerse": 31544, + "Ġintolerance": 31545, + "ĠSnapchat": 31546, + "ĠSharks": 31547, + "Ġdummy": 31548, + "ĠDiagn": 31549, + "ĠGuitar": 31550, + "imeters": 31551, + "403": 31552, + "REG": 31553, + "Ax": 31554, + "Ġseparates": 31555, + "ĠMahm": 31556, + "Ġtv": 31557, + "jah": 31558, + "OOL": 31559, + "Circ": 31560, + "ĠWindsor": 31561, + "ussian": 31562, + "Ġintuition": 31563, + "Ġdisdain": 31564, + "ĠDonovan": 31565, + "Ġ221": 31566, + "Emb": 31567, + "Ġcondemning": 31568, + "Ġgenerosity": 31569, + "zzy": 31570, + "Ġpanties": 31571, + "ĠPrevent": 31572, + "ActionCode": 31573, + "ANA": 31574, + "342": 31575, + "externalActionCode": 31576, + "Ġspecifying": 31577, + "Ġcrystall": 31578, + "Jere": 31579, + "Ġrupt": 31580, + "ĠApprentice": 31581, + "Ġprofiling": 31582, + "к": 31583, + "Strike": 31584, + "Ġsideline": 31585, + "Ġobligated": 31586, + "Ġoccult": 31587, + "Ġbureaucratic": 31588, + "antically": 31589, + "rupted": 31590, + "negative": 31591, + "ĠEthiopia": 31592, + "ĠCivic": 31593, + "Ġinsiders": 31594, + "eligible": 31595, + "ĠTVs": 31596, + "ĠBAR": 31597, + "ĠTI": 31598, + "iologist": 31599, + "ĠAIR": 31600, + "Ġsubstituted": 31601, + "Arab": 31602, + "ĠSaul": 31603, + "ĠYog": 31604, + "prem": 31605, + "Ġbuilders": 31606, + "Ġstationary": 31607, + "Ġdoubtful": 31608, + "Ġvigorously": 31609, + "Ġthrilling": 31610, + "Physical": 31611, + "ĠCarey": 31612, + "ĠHydra": 31613, + "geoning": 31614, + "ĠSly": 31615, + "yton": 31616, + "Ġborrowers": 31617, + "ĠParkinson": 31618, + "Ġë": 31619, + "ĠJamaica": 31620, + "Ġsatir": 31621, + "Ġinsurgents": 31622, + "ĠFirm": 31623, + "Ġisot": 31624, + "ĠKarn": 31625, + "ourning": 31626, + "akens": 31627, + "docs": 31628, + "little": 31629, + "ĠMonaco": 31630, + "CLASS": 31631, + "Turkey": 31632, + "Ly": 31633, + "ĠConan": 31634, + "assic": 31635, + "Ġstarred": 31636, + "ĠPacers": 31637, + "eties": 31638, + "Ġtipping": 31639, + "Moon": 31640, + "ĠRw": 31641, + "same": 31642, + "Ġcavity": 31643, + "Ġgoof": 31644, + "ĠZo": 31645, + "Shock": 31646, + "ummer": 31647, + "Ġemphasizes": 31648, + "Ġregrett": 31649, + "Ġnovelty": 31650, + "Ġenvy": 31651, + "ĠPassive": 31652, + "rw": 31653, + "505": 31654, + "Ġindifferent": 31655, + "ĠRica": 31656, + "ĠHimself": 31657, + "ĠFreddie": 31658, + "Ġadip": 31659, + "ä¸Ģ": 31660, + "Ġbreakout": 31661, + "Ġhurried": 31662, + "ĠHuang": 31663, + "ĠDisk": 31664, + "Ġroaming": 31665, + "?????-?????-": 31666, + "UV": 31667, + "ĠRicky": 31668, + "ĠSigma": 31669, + "Ġmarginalized": 31670, + "Ġedits": 31671, + "Ġ304": 31672, + "memory": 31673, + "Ġspecimen": 31674, + "293": 31675, + "ãģ¯": 31676, + "Ġvertically": 31677, + "Ġaudition": 31678, + "ĠHeck": 31679, + "Ġcaster": 31680, + "ĠHoldings": 31681, + "adal": 31682, + "ĠCron": 31683, + "ĠLiam": 31684, + "Ġdeflect": 31685, + "Pick": 31686, + "ĠDebug": 31687, + "REF": 31688, + "Ġversatility": 31689, + "othes": 31690, + "classified": 31691, + "ĠMahar": 31692, + "ĠHort": 31693, + "Counter": 31694, + "stasy": 31695, + "noticed": 31696, + "331": 31697, + "ĠShim": 31698, + "fuck": 31699, + "ĠBie": 31700, + "Ġairing": 31701, + "ĠProtein": 31702, + "ĠHolding": 31703, + "Ġspectators": 31704, + "iliated": 31705, + "ĠThatcher": 31706, + "nosis": 31707, + "ãĥ¼ãĥ³": 31708, + "Tele": 31709, + "Boston": 31710, + "ĠTempl": 31711, + "stay": 31712, + "Ġdeclarations": 31713, + "479": 31714, + "Volume": 31715, + "ĠDesigner": 31716, + "ĠOverwatch": 31717, + "idae": 31718, + "Ġonwards": 31719, + "Ġnets": 31720, + "ĠManila": 31721, + "particularly": 31722, + "Ġpolitic": 31723, + "oother": 31724, + "Ġportraits": 31725, + "Ġpavement": 31726, + "cffff": 31727, + "Ġsaints": 31728, + "Ġbeginners": 31729, + "ESPN": 31730, + "Ġshortcomings": 31731, + "âķIJâķIJ": 31732, + "Ġcomet": 31733, + "ĠOrganic": 31734, + "quel": 31735, + "Ġhospitalized": 31736, + "Break": 31737, + "Ġpeel": 31738, + "dylib": 31739, + "aspx": 31740, + "urances": 31741, + "ĠTIM": 31742, + "Pg": 31743, + "Ġreadable": 31744, + "ĠMalik": 31745, + "Ġmuzzle": 31746, + "Ġbenchmarks": 31747, + "dal": 31748, + "ĠVacc": 31749, + "ĠHicks": 31750, + "609": 31751, + "ĠBiblical": 31752, + "heng": 31753, + "Ġoverload": 31754, + "ĠCivilization": 31755, + "Ġimmoral": 31756, + "Ġfries": 31757, + "ãĤĴ": 31758, + "Ġreproduced": 31759, + "Ġformulation": 31760, + "jug": 31761, + "irez": 31762, + "gear": 31763, + "Ġcoached": 31764, + "MpServer": 31765, + "ĠSJ": 31766, + "ĠKw": 31767, + "Init": 31768, + "deal": 31769, + "ĠOro": 31770, + "ĠLoki": 31771, + "ĠSongs": 31772, + "Ġ232": 31773, + "ĠLouise": 31774, + "asionally": 31775, + "Ġuncond": 31776, + "ollywood": 31777, + "Ġprogressives": 31778, + "ĠEnough": 31779, + "ĠDoe": 31780, + "Ġwreckage": 31781, + "Ġbrushed": 31782, + "ĠBaseType": 31783, + "Ġzoning": 31784, + "ishable": 31785, + "hetically": 31786, + "ĠCaucus": 31787, + "ĠHue": 31788, + "Ġkarma": 31789, + "ĠSporting": 31790, + "Ġtrader": 31791, + "Ġseeming": 31792, + "ĠCapture": 31793, + "430": 31794, + "bish": 31795, + "Ġtunes": 31796, + "Ġindoors": 31797, + "ĠSphere": 31798, + "ĠDancing": 31799, + "TERN": 31800, + "Ġnob": 31801, + "ĠGST": 31802, + "maps": 31803, + "Ġpeppers": 31804, + "Fit": 31805, + "Ġoversees": 31806, + "ĠRabbi": 31807, + "ĠRuler": 31808, + "vertising": 31809, + "office": 31810, + "xxx": 31811, + "Ġraft": 31812, + "Changed": 31813, + "Ġtextbooks": 31814, + "Links": 31815, + "ĠOmn": 31816, + "ãĢij": 31817, + "Ġinconvenience": 31818, + "ĠDonetsk": 31819, + "=~": 31820, + "Ġimplicitly": 31821, + "Ġboosts": 31822, + "ĠBones": 31823, + "ĠBoom": 31824, + "Courtesy": 31825, + "Ġsensational": 31826, + "ANY": 31827, + "Ġgreedy": 31828, + "eden": 31829, + "Ġinexper": 31830, + "ĠLer": 31831, + "ĠVale": 31832, + "Ġtighten": 31833, + "ĠEAR": 31834, + "ĠNum": 31835, + "Ġancestor": 31836, + "Sent": 31837, + "ĠHorde": 31838, + "urgical": 31839, + "allah": 31840, + "Ġsap": 31841, + "amba": 31842, + "ĠSpread": 31843, + "twitch": 31844, + "Ġgrandson": 31845, + "Ġfracture": 31846, + "Ġmoderator": 31847, + "ĠSeventh": 31848, + "ĠReverse": 31849, + "Ġestimation": 31850, + "Choose": 31851, + "Ġparach": 31852, + "Ġbarric": 31853, + "ãĢIJ": 31854, + "Ġcompass": 31855, + "Ġallergic": 31856, + "âĢķ": 31857, + "OTHER": 31858, + "errilla": 31859, + "Ġwagon": 31860, + "Ġzinc": 31861, + "Ġrubbed": 31862, + "ĠFuller": 31863, + "ĠLuxembourg": 31864, + "ĠHoover": 31865, + "Ġliar": 31866, + "ĠEvening": 31867, + "ĠCobb": 31868, + "esteem": 31869, + "Ġselector": 31870, + "ĠBrawl": 31871, + "isance": 31872, + "ĠEk": 31873, + "Ġtroop": 31874, + "Ġguts": 31875, + "ĠAppeal": 31876, + "ĠTibetan": 31877, + "Ġroutines": 31878, + "ĠMent": 31879, + "Ġsummarized": 31880, + "steamapps": 31881, + "Ġtranqu": 31882, + "Ġ1929": 31883, + "oran": 31884, + "ĠAuthent": 31885, + "Ġgmaxwell": 31886, + "Ġapprehens": 31887, + "Ġpoems": 31888, + "Ġsausage": 31889, + "ĠWebster": 31890, + "urus": 31891, + "Ġthemed": 31892, + "Ġlounge": 31893, + "Ġcharger": 31894, + "Spoiler": 31895, + "Ġspilled": 31896, + "hog": 31897, + "ĠSunder": 31898, + "ĠAin": 31899, + "ĠAngry": 31900, + "Ġdisqual": 31901, + "ĠFrequency": 31902, + "ĠEthernet": 31903, + "Ġhelper": 31904, + "Percent": 31905, + "Ġhorrifying": 31906, + "Ġail": 31907, + "ĠAllan": 31908, + "EEE": 31909, + "ĠCrossing": 31910, + "449": 31911, + "Ġholog": 31912, + "ĠPuzzles": 31913, + "ĠGoes": 31914, + "erenn": 31915, + "604": 31916, + "ãģı": 31917, + "ĠRafael": 31918, + "Ġatten": 31919, + "ĠEmanuel": 31920, + "Ġupro": 31921, + "ĠSusp": 31922, + "Psych": 31923, + "ĠTrainer": 31924, + "ĠNES": 31925, + "ĠHunts": 31926, + "becue": 31927, + "Ġcounselor": 31928, + "Rule": 31929, + "Ġtoxins": 31930, + "Ġbanners": 31931, + "rifice": 31932, + "Ġgreeting": 31933, + "Ġfrenzy": 31934, + "Ġallocate": 31935, + "Ġ*)": 31936, + "expr": 31937, + "503": 31938, + "ĠChick": 31939, + "ĠTorn": 31940, + "Ġconsolidation": 31941, + "ĠFletcher": 31942, + "switch": 31943, + "frac": 31944, + "clips": 31945, + "ĠMcKin": 31946, + "ĠLunar": 31947, + "Month": 31948, + "ITCH": 31949, + "Ġscholarly": 31950, + "raped": 31951, + "398": 31952, + "Ġ1910": 31953, + "Ġegreg": 31954, + "Ġinsecure": 31955, + "Ġvictorious": 31956, + "cffffcc": 31957, + "Ġsingled": 31958, + "Ġelves": 31959, + "ĠWond": 31960, + "burst": 31961, + "Ġcamoufl": 31962, + "ĠBLACK": 31963, + "Ġconditioned": 31964, + "çī": 31965, + "answered": 31966, + "Ġcompulsory": 31967, + "ascist": 31968, + "Ġpodcasts": 31969, + "ĠFrankfurt": 31970, + "bnb": 31971, + "Ġneoliberal": 31972, + "ĠKeyboard": 31973, + "ĠBelle": 31974, + "warm": 31975, + "Ġtrusts": 31976, + "Ġinsured": 31977, + "ĠBucc": 31978, + "usable": 31979, + "607": 31980, + "ĠPlains": 31981, + "Ġ1890": 31982, + "Ġsabotage": 31983, + "Ġlodged": 31984, + "felt": 31985, + "Ġga": 31986, + "ĠNarc": 31987, + "ĠSalem": 31988, + "Ġseventy": 31989, + "ĠBlank": 31990, + "pocket": 31991, + "Ġwhisper": 31992, + "Ġmating": 31993, + "omics": 31994, + "ĠSalman": 31995, + "ĠKad": 31996, + "Ġangered": 31997, + "Ġcollisions": 31998, + "Ġextraordinarily": 31999, + "Ġcoercion": 32000, + "Ghost": 32001, + "birds": 32002, + "èĢ": 32003, + "kok": 32004, + "Ġpermissible": 32005, + "avorable": 32006, + "Ġpointers": 32007, + "Ġdissip": 32008, + "aci": 32009, + "Ġtheatrical": 32010, + "ĠCosmic": 32011, + "Ġforgetting": 32012, + "Ġfinalized": 32013, + "大": 32014, + "yout": 32015, + "library": 32016, + "Ġbooming": 32017, + "ĠBelieve": 32018, + "ĠTeacher": 32019, + "ĠLiv": 32020, + "ĠGOODMAN": 32021, + "ĠDominican": 32022, + "ORED": 32023, + "ĠParties": 32024, + "Ġprecipitation": 32025, + "ĠSlot": 32026, + "Roy": 32027, + "ĠCombined": 32028, + "Ġintegrating": 32029, + "Ġchrome": 32030, + "Ġintestinal": 32031, + "ĠRebell": 32032, + "Ġmatchups": 32033, + "Ġblockbuster": 32034, + "ĠLoren": 32035, + "ĠLevy": 32036, + "Ġpreaching": 32037, + "ĠSending": 32038, + "ĠPurpose": 32039, + "rax": 32040, + "fif": 32041, + "Ġauthoritative": 32042, + "ĠPET": 32043, + "astical": 32044, + "Ġdishon": 32045, + "Ġchatting": 32046, + "Ġ\"$:/": 32047, + "Connection": 32048, + "Ġrecreate": 32049, + "Ġdelinqu": 32050, + "Ġbroth": 32051, + "ĠDirty": 32052, + "ĠAdmin": 32053, + "zman": 32054, + "Ġscholarships": 32055, + "Ġ253": 32056, + "contact": 32057, + "alsa": 32058, + "767": 32059, + "creen": 32060, + "abbage": 32061, + "Ġ1915": 32062, + "Ġblended": 32063, + "Ġalarmed": 32064, + "Language": 32065, + "356": 32066, + "Ġblends": 32067, + "ĠChanged": 32068, + "Wolf": 32069, + "Ġhepat": 32070, + "Creating": 32071, + "Ġpersecut": 32072, + "Ġsweetness": 32073, + "arte": 32074, + "Ġforfeiture": 32075, + "ĠRoberto": 32076, + "impro": 32077, + "NFL": 32078, + "ĠMagnet": 32079, + "Detailed": 32080, + "Ġinsignificant": 32081, + "ĠPOLIT": 32082, + "ĠBBQ": 32083, + "ĠCPS": 32084, + "Ġseaw": 32085, + "aminer": 32086, + "mL": 32087, + "endif": 32088, + "finals": 32089, + "Ġ265": 32090, + "uish": 32091, + "Ġ})": 32092, + "ĠProblems": 32093, + "Ġemblem": 32094, + "Ġseriousness": 32095, + "Ġparsing": 32096, + "Ġsubstitution": 32097, + "Ġpressured": 32098, + "Ġrecycled": 32099, + "aleb": 32100, + "Ruby": 32101, + "Ġproficiency": 32102, + "Driver": 32103, + "ĠWester": 32104, + ":'": 32105, + "AFTA": 32106, + "Ġmantle": 32107, + "ĠClayton": 32108, + "flag": 32109, + "Ġpractitioner": 32110, + "covered": 32111, + "ĠStruct": 32112, + "addafi": 32113, + "425": 32114, + "ĠTownship": 32115, + "ĠHydro": 32116, + "Louis": 32117, + "343": 32118, + "Ġcondo": 32119, + "ĠTao": 32120, + "Ġutilization": 32121, + "Ġnausea": 32122, + "ĠDems": 32123, + "ridges": 32124, + "pause": 32125, + "Ġformulas": 32126, + "Ġchallenger": 32127, + "376": 32128, + "Ġdefective": 32129, + "ĠRailway": 32130, + "ĠPubMed": 32131, + "Ġyogurt": 32132, + "lbs": 32133, + "ĠNorfolk": 32134, + "OPE": 32135, + "ĠMoody": 32136, + "Ġdistributor": 32137, + "Ġscrolls": 32138, + "Ġextracts": 32139, + "Stan": 32140, + "Ġviability": 32141, + "Ġexposes": 32142, + "Ġstarvation": 32143, + "ĠSteps": 32144, + "ĠDodd": 32145, + "few": 32146, + "STD": 32147, + "332": 32148, + "Ġclosures": 32149, + "Ġcomplementary": 32150, + "ĠSasha": 32151, + "umpy": 32152, + "Ġmonet": 32153, + "Ġarticulate": 32154, + "ĠDoct": 32155, + "killer": 32156, + "Ġscrim": 32157, + "Ġ264": 32158, + "Ġprostitutes": 32159, + "Ġsevered": 32160, + "Ġattachments": 32161, + "Ġcooled": 32162, + "Lev": 32163, + "ĠFalk": 32164, + "fail": 32165, + "Ġpoliceman": 32166, + "ĠDag": 32167, + "Ġprayed": 32168, + "ĠKernel": 32169, + "Ġclut": 32170, + "Ġcath": 32171, + "Ġanomaly": 32172, + "Storm": 32173, + "emaker": 32174, + "ĠBreakfast": 32175, + "uli": 32176, + "oire": 32177, + "JJ": 32178, + "hz": 32179, + "Operation": 32180, + "ĠSick": 32181, + "354": 32182, + "ĠGuatemala": 32183, + "Rate": 32184, + "Ġexposures": 32185, + "faces": 32186, + "ĠArchae": 32187, + "raf": 32188, + "ĠMia": 32189, + "Ġ2025": 32190, + "Ġopaque": 32191, + "Ġdisguised": 32192, + "ĠHeadquarters": 32193, + "Sah": 32194, + "Ġpots": 32195, + "978": 32196, + "ĠMalf": 32197, + "Ġfrowned": 32198, + "Ġpoisonous": 32199, + "ĠConvers": 32200, + "eeks": 32201, + "Ġcrab": 32202, + ".\"\"": 32203, + "Ġtreason": 32204, + "Ġranc": 32205, + "Ġescalating": 32206, + "Ġwarr": 32207, + "Ġmobs": 32208, + "Ġlamps": 32209, + "ĠSunshine": 32210, + "ĠBrunswick": 32211, + "Phones": 32212, + "Ġspelled": 32213, + "ĠSkip": 32214, + "Ġ2050": 32215, + "Ġ1911": 32216, + "ĠPluto": 32217, + "ĠAmend": 32218, + "Ġmeats": 32219, + "387": 32220, + "Ġstomp": 32221, + "ĠZhou": 32222, + "ĠLeviathan": 32223, + "ĠHazard": 32224, + "adv": 32225, + "ĠOrwell": 32226, + "Ġaloud": 32227, + "Ġbumper": 32228, + "ĠAnarch": 32229, + "ubuntu": 32230, + "ĠSerious": 32231, + "fitting": 32232, + "ĠOptional": 32233, + "ĠCecil": 32234, + "REAM": 32235, + "Ġserotonin": 32236, + "Ġcultivate": 32237, + "agogue": 32238, + "}\\": 32239, + "Ġmosques": 32240, + "ĠSunny": 32241, + "Ġreactive": 32242, + "revolution": 32243, + "ĠLup": 32244, + "ĠFedora": 32245, + "Ġdefenseman": 32246, + "ĠVID": 32247, + "istine": 32248, + "Ġdrowning": 32249, + "ĠBroadcasting": 32250, + "Ġthriller": 32251, + "ĠScy": 32252, + "Ġaccelerating": 32253, + "Ġdirects": 32254, + "odied": 32255, + "bike": 32256, + "duration": 32257, + "Ġpainfully": 32258, + "Redd": 32259, + "Ġproductions": 32260, + "Ġgag": 32261, + "Ġwhist": 32262, + "Ġsock": 32263, + "Ġinfinitely": 32264, + "ĠConcern": 32265, + "ĠCitadel": 32266, + "Ġlieu": 32267, + "Ġcandles": 32268, + "ogeneous": 32269, + "arger": 32270, + "Ġheavenly": 32271, + "inflammatory": 32272, + "Performance": 32273, + "Cs": 32274, + "ructose": 32275, + "azaki": 32276, + "Ġpessim": 32277, + "Ġinference": 32278, + "Ġpowd": 32279, + "ĠZoe": 32280, + "Ġpaints": 32281, + "Ġdazz": 32282, + "pta": 32283, + "-----------": 32284, + "Ġinspir": 32285, + "ĠExperimental": 32286, + "ĠKnife": 32287, + "regor": 32288, + "bors": 32289, + "Ġshowers": 32290, + "romeda": 32291, + "Ġsaint": 32292, + "Ġbenign": 32293, + "ĠJiang": 32294, + "Ġenvisioned": 32295, + "Ġshroud": 32296, + "IFT": 32297, + "HO": 32298, + "Ġshuff": 32299, + "ĠICC": 32300, + "Ġsegreg": 32301, + "Ġrevisit": 32302, + "ighthouse": 32303, + "Li": 32304, + "Ġsubstrate": 32305, + "ĠSeas": 32306, + "ĠReward": 32307, + "ĠHep": 32308, + "ĠBrass": 32309, + "sbm": 32310, + "Ġeliminates": 32311, + "Ġstamina": 32312, + "ĠVAT": 32313, + "ĠLoan": 32314, + "Ġconstraint": 32315, + "Ġappropriated": 32316, + "Ġpes": 32317, + "ĠALE": 32318, + "ranging": 32319, + "Ġ404": 32320, + "392": 32321, + "Ġintellectuals": 32322, + "achu": 32323, + "Ġrestructuring": 32324, + "ĠLevin": 32325, + "Ġrunes": 32326, + "Ġdelightful": 32327, + "Ġcarbohydrates": 32328, + "ĠModels": 32329, + "ĠExpo": 32330, + "Ġtransporting": 32331, + "alloc": 32332, + "Ġringing": 32333, + "Samsung": 32334, + "Ġscarcely": 32335, + "ĠURLs": 32336, + "ĠMAS": 32337, + "Ġprototypes": 32338, + "Ġnarrator": 32339, + "ĠCPUs": 32340, + "cdn": 32341, + "ĠBarton": 32342, + "Ġdecidedly": 32343, + "ĠShu": 32344, + "ixir": 32345, + "ocious": 32346, + "ĠMyst": 32347, + "Nintendo": 32348, + "Ġreuse": 32349, + "Ġforgiven": 32350, + "Few": 32351, + "inical": 32352, + "nat": 32353, + "Ġseamless": 32354, + "ĠEva": 32355, + "ĠEVE": 32356, + "ĠJO": 32357, + "landers": 32358, + "Ġsofter": 32359, + "negie": 32360, + "Ġtransient": 32361, + "Ġorbital": 32362, + "Ġfulfil": 32363, + "ĠKom": 32364, + "Hopefully": 32365, + "Ġdynamically": 32366, + "ĠHunger": 32367, + "åĽ": 32368, + "ĠArmenia": 32369, + "elman": 32370, + "berto": 32371, + "Ġpige": 32372, + "ĠIDs": 32373, + "limit": 32374, + "Ġveins": 32375, + "Ġsoaring": 32376, + "packs": 32377, + "Golden": 32378, + "ĠCrab": 32379, + "istor": 32380, + "ĠRPM": 32381, + "Ġ$$": 32382, + "gression": 32383, + "Ġjihadist": 32384, + "Ġgamble": 32385, + "Ġcareg": 32386, + "Ġinflated": 32387, + "Face": 32388, + "ĠFirearms": 32389, + "ĠEmmanuel": 32390, + "âĿ": 32391, + "Ġshocks": 32392, + "grab": 32393, + "Ġsplend": 32394, + "ĠHPV": 32395, + "abortion": 32396, + "Above": 32397, + "Entity": 32398, + "players": 32399, + "Ġcommenced": 32400, + "ulence": 32401, + "Ġfulfillment": 32402, + "Ġembodiments": 32403, + "ĠWelfare": 32404, + "Ġhail": 32405, + "Ġ<@": 32406, + "tten": 32407, + "Ġcatcher": 32408, + "ĠJazeera": 32409, + "Ġvolcano": 32410, + "Ġstabilize": 32411, + "ĠHandler": 32412, + "Ġintensified": 32413, + "ĠAbrams": 32414, + "Ġhumiliation": 32415, + "paced": 32416, + "605": 32417, + "ĠCentOS": 32418, + "Specific": 32419, + "Ġheed": 32420, + "ĠCAM": 32421, + "ĠGalile": 32422, + "Die": 32423, + "Ġabolished": 32424, + "ĠThomson": 32425, + "ĠTeachers": 32426, + "ĠWass": 32427, + "jong": 32428, + "ĠISBN": 32429, + "ĠAllies": 32430, + "shake": 32431, + "å·": 32432, + "vict": 32433, + "Howard": 32434, + "Ġdeem": 32435, + "Ġexceedingly": 32436, + "ĠSmartstocks": 32437, + "ibe": 32438, + "Ġdoorway": 32439, + "Ġcompeted": 32440, + "igmat": 32441, + "Ġnationalists": 32442, + "Ġgroom": 32443, + "ĠKeen": 32444, + "Ġdisposable": 32445, + "decl": 32446, + "ĠTolkien": 32447, + "ĠScheme": 32448, + "Ġbiod": 32449, + "Ġavid": 32450, + "ĠElon": 32451, + "agar": 32452, + "ĠTSA": 32453, + "Roman": 32454, + "Ġartificially": 32455, + "Ġadvisors": 32456, + "XL": 32457, + "ĠInferno": 32458, + "366": 32459, + "Ġtedious": 32460, + "ĠPhotography": 32461, + "ĠCarrie": 32462, + "Ġtrope": 32463, + "ĠSandra": 32464, + "Ġdecimal": 32465, + "Queen": 32466, + "ĠGundam": 32467, + "ĠOM": 32468, + "otech": 32469, + "NBA": 32470, + "Ġ1932": 32471, + "Ġentrenched": 32472, + "ĠMarion": 32473, + "Ġfraternity": 32474, + "Labour": 32475, + "Henry": 32476, + "Ġlatitude": 32477, + "Either": 32478, + "Ġenhances": 32479, + "ĠPotential": 32480, + "Ġshines": 32481, + "idad": 32482, + "Ġbreadth": 32483, + "Ġcapacities": 32484, + "ĠðŁĻĤ": 32485, + "ĠBronx": 32486, + "Ġsexes": 32487, + "Ġdifferentiation": 32488, + "Ġheavyweight": 32489, + "ĠTaj": 32490, + "dra": 32491, + "Ġmigrate": 32492, + "Ġexhaustion": 32493, + "ĠRUN": 32494, + "elsius": 32495, + "ĠCuomo": 32496, + "Ġguitars": 32497, + "Ġclones": 32498, + "ĠSomew": 32499, + "ĠPry": 32500, + "-------------": 32501, + "Ġwarranted": 32502, + "cycles": 32503, + "Ġsalvage": 32504, + "Ġdisks": 32505, + "RANT": 32506, + "ĠNGOs": 32507, + "ĠMartian": 32508, + "\":[{\"": 32509, + "Ġaddicts": 32510, + "ojure": 32511, + "illet": 32512, + "Ġamazingly": 32513, + "artments": 32514, + "pixel": 32515, + "ĠGPUs": 32516, + "Layout": 32517, + "è£": 32518, + "ĠTamil": 32519, + "ĠBasil": 32520, + "Ġimpartial": 32521, + "ĠStructure": 32522, + "fork": 32523, + "bryce": 32524, + "Ġridge": 32525, + "ĠHamburg": 32526, + "rious": 32527, + "Ġblitz": 32528, + "cigarettes": 32529, + "Ġcanned": 32530, + "402": 32531, + "Ġironically": 32532, + "Ġcompassionate": 32533, + "ĠHawkins": 32534, + ".#": 32535, + "ĠCathedral": 32536, + "Ġrallied": 32537, + "internal": 32538, + "Ġquota": 32539, + "stakes": 32540, + "TEXT": 32541, + "mom": 32542, + "Ġcompletes": 32543, + "Ġ238": 32544, + "Ġshrug": 32545, + "ãĥij": 32546, + "ĠNinth": 32547, + "Ġrevise": 32548, + "ĠProvider": 32549, + "Ġtreacher": 32550, + "Ġquasi": 32551, + "ĠPRES": 32552, + "Ġdeposition": 32553, + "Ġconfidentiality": 32554, + "issors": 32555, + "Ġimbalance": 32556, + "Ġspanning": 32557, + "Ġangular": 32558, + "ĠCul": 32559, + "communication": 32560, + "ĠNora": 32561, + "ĠGenius": 32562, + "opter": 32563, + "Ġsacked": 32564, + "Spot": 32565, + "Ġfinely": 32566, + "ĠCHR": 32567, + "282": 32568, + "waves": 32569, + "Palest": 32570, + "ĠRohing": 32571, + "NL": 32572, + "è¿": 32573, + "Ġshitty": 32574, + "ĠScalia": 32575, + "475": 32576, + "Progress": 32577, + "Ġreferencing": 32578, + "Ġclassrooms": 32579, + "abee": 32580, + "Ġsod": 32581, + "hesion": 32582, + "708": 32583, + "ĠZuckerberg": 32584, + "ĠFinish": 32585, + "ĠScotia": 32586, + "ĠSavior": 32587, + "ĠInstallation": 32588, + "antha": 32589, + "(-": 32590, + "Ġ302": 32591, + "ĠPunk": 32592, + "Ġcrater": 32593, + "youtu": 32594, + "Ġroast": 32595, + "Ġinfluencing": 32596, + "Ġdup": 32597, + "ĠJR": 32598, + "ĠGrav": 32599, + "Ġstature": 32600, + "Ġbathrooms": 32601, + "Aside": 32602, + "Wiki": 32603, + "mean": 32604, + "ĠZak": 32605, + "ĠOnes": 32606, + "ĠNath": 32607, + "Ġhypert": 32608, + "Ġcommencement": 32609, + "Civil": 32610, + "Ġmoderately": 32611, + "Ġdistributors": 32612, + "Ġbreastfeeding": 32613, + "Ġ980": 32614, + "ĠSik": 32615, + "ĠCig": 32616, + "ĠAMER": 32617, + "RIP": 32618, + "ĠCareer": 32619, + "usting": 32620, + "Ġmessed": 32621, + "Ġeh": 32622, + "ĠJensen": 32623, + "/$": 32624, + "Ġblackmail": 32625, + "Ġconversions": 32626, + "Ġscientifically": 32627, + "Ġmantra": 32628, + "paying": 32629, + "Ġivory": 32630, + "ĠCourts": 32631, + "OUGH": 32632, + "auntlet": 32633, + "Serial": 32634, + "Brow": 32635, + "ĠHundreds": 32636, + "323": 32637, + "Ġpee": 32638, + "Ġlinux": 32639, + "Ġsubmer": 32640, + "ĠPrincipal": 32641, + "485": 32642, + "ĠDSL": 32643, + "ĠCousins": 32644, + "Ġdoctrines": 32645, + "ĠAthletics": 32646, + "Ġ315": 32647, + "ĠKarma": 32648, + "Ġattent": 32649, + "urger": 32650, + "Ġprescribe": 32651, + "Ġencaps": 32652, + "ĠCame": 32653, + "Ġsecretive": 32654, + "ĠCrimes": 32655, + "dn": 32656, + "Clean": 32657, + "ĠEgyptians": 32658, + "ĠCarpenter": 32659, + "Ġll": 32660, + "Hum": 32661, + "ĠMilo": 32662, + "Ġcapitalists": 32663, + "Ġbriefed": 32664, + "Twe": 32665, + "ĠBasin": 32666, + "elvet": 32667, + "Mos": 32668, + "Ġplunge": 32669, + "ĠKaiser": 32670, + "ĠFuj": 32671, + "illin": 32672, + "Ġsafeguards": 32673, + "Ġoste": 32674, + "ĠOpportunity": 32675, + "ĠMafia": 32676, + "ĠCalling": 32677, + "apa": 32678, + "urban": 32679, + "brush": 32680, + "illard": 32681, + "cé": 32682, + "intelligence": 32683, + "ĠLob": 32684, + "ĠDruid": 32685, + "Ġsmoother": 32686, + "Ġfooting": 32687, + "Ġmotorists": 32688, + "arcity": 32689, + "Ġmasculinity": 32690, + "Ġmism": 32691, + "Ġabdominal": 32692, + "ĠTavern": 32693, + "ĠRoh": 32694, + "Ġescapes": 32695, + "signed": 32696, + "Anthony": 32697, + "Ġsacrificing": 32698, + "Ġintimacy": 32699, + "Ġanterior": 32700, + "ĠKod": 32701, + "Ġmotif": 32702, + "Ġgraz": 32703, + "Ġvisualization": 32704, + "Ġguitarist": 32705, + "ĠTrotsky": 32706, + "magic": 32707, + "Dar": 32708, + "ĠMori": 32709, + "Ġwards": 32710, + "Ġtoilets": 32711, + "lest": 32712, + "Ġteleport": 32713, + "ĠSundays": 32714, + "ĠPlat": 32715, + "ETS": 32716, + "ĠeSports": 32717, + "Patrick": 32718, + "ĠKatherine": 32719, + "enko": 32720, + "Ġhassle": 32721, + "ĠMick": 32722, + "ggles": 32723, + "Ġhob": 32724, + "aintain": 32725, + "Ġairborne": 32726, + "Ġspans": 32727, + "Ġchili": 32728, + "Ġaperture": 32729, + "Ġvolunteered": 32730, + "ĠIncident": 32731, + "ĠFres": 32732, + "ĠVeteran": 32733, + "aughtered": 32734, + "ingo": 32735, + "Ġuninsured": 32736, + "CLOSE": 32737, + "Ġfuse": 32738, + "Ġerotic": 32739, + "Ġadvertise": 32740, + "raising": 32741, + "Texture": 32742, + "Ġattends": 32743, + "ĠREAL": 32744, + "uddled": 32745, + "Ġsmoot": 32746, + "Ġ305": 32747, + "ĠWillis": 32748, + "Ġblond": 32749, + "Analysis": 32750, + "ĠVT": 32751, + "onica": 32752, + "Ġstronghold": 32753, + "RF": 32754, + "NM": 32755, + ".>>": 32756, + "Ġprosperous": 32757, + "Ġboasted": 32758, + "292": 32759, + "ĠManufacturing": 32760, + "PRESS": 32761, + "gren": 32762, + "Ġpharmacy": 32763, + "ĠRockefeller": 32764, + "kai": 32765, + "Ġthumbs": 32766, + "ĠHut": 32767, + "Ġmotherboard": 32768, + "Ġguardians": 32769, + "ĠAlter": 32770, + "llular": 32771, + "Ġshack": 32772, + "Ġwisely": 32773, + "Ġbackbone": 32774, + "erva": 32775, + "Ġsuicides": 32776, + "ĠMcGregor": 32777, + "ijah": 32778, + "Emer": 32779, + "ĠBrav": 32780, + "Ġdesignate": 32781, + "POST": 32782, + "produced": 32783, + "Ġcleansing": 32784, + "irlwind": 32785, + "existent": 32786, + "ĠHumph": 32787, + "ĠPayne": 32788, + "Ġvested": 32789, + "Å¡": 32790, + "Ġstringent": 32791, + "iona": 32792, + "Ġunsub": 32793, + "Ġsummed": 32794, + "ĠHercules": 32795, + "subject": 32796, + "ĠRagnar": 32797, + "ĠNos": 32798, + "Ġcharacterization": 32799, + "Ġsavvy": 32800, + "ĠDawson": 32801, + "ĠCasino": 32802, + "Ġfri": 32803, + "ĠBarrier": 32804, + "Ġmisinformation": 32805, + "Ġinsulation": 32806, + "Ġcorridors": 32807, + "Ġairplanes": 32808, + "ĠNoct": 32809, + "ahi": 32810, + "Ġ1916": 32811, + "kb": 32812, + "armac": 32813, + "Ġshun": 32814, + "Ġschema": 32815, + "Ġhorrified": 32816, + "Ġ239": 32817, + "aunders": 32818, + "NB": 32819, + "iates": 32820, + "erity": 32821, + "ĠShard": 32822, + "Ġrarity": 32823, + "Ġgrouped": 32824, + "ĠGhana": 32825, + "against": 32826, + "ĠBiological": 32827, + "ĠAware": 32828, + "owell": 32829, + "ÏĦ": 32830, + "ĠBeau": 32831, + "shaw": 32832, + "Hack": 32833, + "ĠJulius": 32834, + "USS": 32835, + "olson": 32836, + "auna": 32837, + "cru": 32838, + "ĠMaurice": 32839, + "ĠIk": 32840, + "Ġsequencing": 32841, + "Ġradicals": 32842, + "Ġ(?,": 32843, + "virtual": 32844, + "Ġanyways": 32845, + "Ġreperc": 32846, + "Ġhandlers": 32847, + "Ġhesitant": 32848, + "éĥ": 32849, + "ĠMF": 32850, + "plementation": 32851, + "associated": 32852, + "Ġcampaigned": 32853, + "ĠYue": 32854, + "utations": 32855, + "ĠYoga": 32856, + "Ġsimmer": 32857, + "Ġrods": 32858, + "Ġmelody": 32859, + "Ġconvoy": 32860, + "videos": 32861, + "Ġscreened": 32862, + "Neg": 32863, + "ochemical": 32864, + "Ġ())": 32865, + "Ġultras": 32866, + "Ġantip": 32867, + "ĠIslanders": 32868, + "704": 32869, + "Ġfetish": 32870, + "Ġridiculously": 32871, + "ĠKart": 32872, + "Ġmitochondrial": 32873, + "Ġinterfering": 32874, + "Builder": 32875, + "Ġoverfl": 32876, + "Ġacne": 32877, + "ĠMud": 32878, + "ĠKerr": 32879, + "flex": 32880, + "ĠPostal": 32881, + "ĠBaltic": 32882, + "477": 32883, + "ĠPersons": 32884, + "ourage": 32885, + "HB": 32886, + "ĠMuse": 32887, + "ĠImmortal": 32888, + "ĠDriving": 32889, + "Ġpetitions": 32890, + "Ġsubscript": 32891, + "Ġsorce": 32892, + "ĠProcessor": 32893, + "uton": 32894, + "Sony": 32895, + "Ġphon": 32896, + "Ġraced": 32897, + "ĠAnthrop": 32898, + "Ġdaytime": 32899, + "ĠExercise": 32900, + "Adding": 32901, + "Ġengages": 32902, + "ĠQualcomm": 32903, + "Ġmiracles": 32904, + "Ġmemes": 32905, + "ĠDrink": 32906, + "ĠOrioles": 32907, + "Ġhairs": 32908, + "ĠPolar": 32909, + "athom": 32910, + "Ġslippery": 32911, + "ĠRemy": 32912, + "Ġcaramel": 32913, + "ĠYEAR": 32914, + "Ġalk": 32915, + "Ign": 32916, + "aution": 32917, + "ĠMerlin": 32918, + "ĠCran": 32919, + "Ġapologies": 32920, + "Ġ410": 32921, + "Ġouting": 32922, + "ĠMemories": 32923, + "appointed": 32924, + "Ġcountered": 32925, + "uld": 32926, + "posing": 32927, + "Ġfirewall": 32928, + "ĠWast": 32929, + "ĠWet": 32930, + "worked": 32931, + "seller": 32932, + "Ġrepealed": 32933, + "ereo": 32934, + "assuming": 32935, + "BLIC": 32936, + "mite": 32937, + "ĠCEOs": 32938, + "ĠChapel": 32939, + "elligent": 32940, + "________________________": 32941, + "Dog": 32942, + "Ġwart": 32943, + "Ġsubscriber": 32944, + "sports": 32945, + "Ġbegged": 32946, + "ĠMV": 32947, + "Ġsemif": 32948, + "ethical": 32949, + "Ġpreach": 32950, + "Ġrevital": 32951, + "Ġpunitive": 32952, + "Ġshortcuts": 32953, + "Ġinstituted": 32954, + "ĠWarsaw": 32955, + "Ġabdomen": 32956, + "ĠKING": 32957, + "Ġsuperintendent": 32958, + "Ġfry": 32959, + "ĠGeo": 32960, + "TOR": 32961, + "Ġcontradictions": 32962, + "aptic": 32963, + "Ġlandscapes": 32964, + "bugs": 32965, + "Ġclust": 32966, + "Ġvolley": 32967, + "cribed": 32968, + "Ġtandem": 32969, + "Ġrobes": 32970, + "WHAT": 32971, + "Ġpromoter": 32972, + "Ġeloqu": 32973, + "reviewed": 32974, + "ĠDK": 32975, + "ĠPlato": 32976, + "Ġfps": 32977, + "Tank": 32978, + "ĠDerrick": 32979, + "Ġprioritize": 32980, + "asper": 32981, + "ĠHonduras": 32982, + "ĠCompleted": 32983, + "nec": 32984, + "Ġmog": 32985, + "nir": 32986, + "ĠMayo": 32987, + "DEF": 32988, + "stall": 32989, + "inness": 32990, + "ĠVolkswagen": 32991, + "Ġprecaution": 32992, + "ĠMell": 32993, + "iak": 32994, + "istries": 32995, + "Ġ248": 32996, + "Ġoverlapping": 32997, + "Senate": 32998, + "ĠEnhance": 32999, + "resy": 33000, + "racial": 33001, + "ORTS": 33002, + "ĠMormons": 33003, + "Strong": 33004, + "ĠCoch": 33005, + "Mexico": 33006, + "ĠMaduro": 33007, + "Ġjars": 33008, + "Ġcane": 33009, + "Wik": 33010, + "olla": 33011, + "ifference": 33012, + "Ġphysicist": 33013, + "ĠMaggie": 33014, + "Ġ285": 33015, + "Ġdepiction": 33016, + "ĠMcLaren": 33017, + "Ju": 33018, + "Ġslows": 33019, + "Ġcommissioners": 33020, + "ĠWillow": 33021, + "ĠExplos": 33022, + "hovah": 33023, + "Ġtechnician": 33024, + "Ġhomicides": 33025, + "ĠFlav": 33026, + "ĠTruman": 33027, + "Ġ10000": 33028, + "uctor": 33029, + "Ġshader": 33030, + "Newsletter": 33031, + "457": 33032, + "Ġrever": 33033, + "Ġhardened": 33034, + "Ġwhereabouts": 33035, + "Ġredevelop": 33036, + "Ġcarbs": 33037, + "Ġtravers": 33038, + "Ġsquirrel": 33039, + "Ġfollower": 33040, + "Ġsings": 33041, + "508": 33042, + "Ġrabbits": 33043, + "emonium": 33044, + "Ġdocumenting": 33045, + "Ġmisunderstood": 33046, + ")'": 33047, + "Rick": 33048, + "ggies": 33049, + "Ġpremie": 33050, + "Ġskating": 33051, + "Ġpassports": 33052, + "Ġfists": 33053, + "ageddon": 33054, + "Haw": 33055, + "ACP": 33056, + "080": 33057, + "ĠThoughts": 33058, + "ĠCarlson": 33059, + "Ġpriesthood": 33060, + "hua": 33061, + "Ġdungeons": 33062, + "ĠLoans": 33063, + "Ġantis": 33064, + "Ġfamiliarity": 33065, + "ĠSabb": 33066, + "opal": 33067, + "ĠInk": 33068, + "strike": 33069, + "Ġcram": 33070, + "Ġlegalized": 33071, + "Ġcuisine": 33072, + "Ġfibre": 33073, + "Travel": 33074, + "ĠMonument": 33075, + "ODY": 33076, + "ethy": 33077, + "Ġinterstate": 33078, + "ĠPUR": 33079, + "emporary": 33080, + "ĠArabian": 33081, + "developed": 33082, + "Ġsaddle": 33083, + "Ġgithub": 33084, + "ĠOffer": 33085, + "ĠISP": 33086, + "rolet": 33087, + "ĠSUPER": 33088, + "ĠDenis": 33089, + "Ġmultiplier": 33090, + "Ġstirred": 33091, + "Interestingly": 33092, + "Ġcustomary": 33093, + "Ġbilled": 33094, + "hex": 33095, + "Ġmultiplied": 33096, + "Ġflipping": 33097, + "ĠCrosby": 33098, + "Ġfundamentals": 33099, + "iae": 33100, + "ĠPlayed": 33101, + "ĠAtom": 33102, + "amazon": 33103, + "ĠFlam": 33104, + "eez": 33105, + "activated": 33106, + "Ġtablespoon": 33107, + "Ġliberalism": 33108, + "ĠPalin": 33109, + "ĠPatel": 33110, + "Num": 33111, + "ĠTAM": 33112, + "Ġsurn": 33113, + "ĠReloaded": 33114, + "Ġcoined": 33115, + "\"],": 33116, + "ĠClash": 33117, + "ĠAgu": 33118, + "Ġpragmatic": 33119, + "ĠActivate": 33120, + "Ġ802": 33121, + "Ġtrailers": 33122, + "Ġsilhou": 33123, + "Ġprobes": 33124, + "Ġcircus": 33125, + "ĠBain": 33126, + "ĠLindsay": 33127, + "ĠAbbey": 33128, + "Delivery": 33129, + "Ġconcession": 33130, + "Ġgastro": 33131, + "ĠSprite": 33132, + "ÄŁ": 33133, + "andel": 33134, + "Ġgimm": 33135, + "Ġautobi": 33136, + "ĠTurtle": 33137, + "Ġwonderfully": 33138, + "ĠHaram": 33139, + "ĠWorldwide": 33140, + "ĠHandle": 33141, + "Ġtheorists": 33142, + "Ġsleek": 33143, + "ĠZhu": 33144, + "ographically": 33145, + "EGA": 33146, + "ĠOwners": 33147, + "aths": 33148, + "ĠAntarctic": 33149, + "natal": 33150, + "=\"\"": 33151, + "flags": 33152, + "````": 33153, + "Ġsul": 33154, + "Kh": 33155, + "Ġpotassium": 33156, + "Ġlineman": 33157, + "Ġcereal": 33158, + "ĠSeasons": 33159, + "Ġ2022": 33160, + "Ġmathematic": 33161, + "Ġastronomers": 33162, + "professional": 33163, + "Ġfares": 33164, + "cknowled": 33165, + "Ġchi": 33166, + "Ġyoungsters": 33167, + "Ġmistakenly": 33168, + "Ġhemisphere": 33169, + "ĠDivinity": 33170, + "rone": 33171, + "Ġ\",": 33172, + "rings": 33173, + "Ġattracts": 33174, + "vana": 33175, + "å¹": 33176, + "CAP": 33177, + "Ġplaylist": 33178, + "Ġporch": 33179, + "ãģ£": 33180, + "Ġincorporates": 33181, + "Ġsoak": 33182, + "Ġasserting": 33183, + "ĠTerrorism": 33184, + "ĠPablo": 33185, + "Ja": 33186, + "cester": 33187, + "Ġfearing": 33188, + "ĠPrayer": 33189, + "Ġescalated": 33190, + "GW": 33191, + "Ġrobe": 33192, + "ĠBrighton": 33193, + "acists": 33194, + "ĠSymphony": 33195, + "ĠDwarf": 33196, + "ĠParade": 33197, + "ĠLego": 33198, + "Ġinexpl": 33199, + "Ġlords": 33200, + "leaf": 33201, + "RAG": 33202, + "liber": 33203, + "Ġcigars": 33204, + "ĠJehovah": 33205, + "606": 33206, + "WINDOWS": 33207, + "ĠLiberia": 33208, + "ebus": 33209, + "Heavy": 33210, + "Ġlubric": 33211, + "ĠRW": 33212, + "anguages": 33213, + "Ġnarrowed": 33214, + "computer": 33215, + "ĠEmber": 33216, + "Ġmurdering": 33217, + "Ġdownstream": 33218, + "ĠTuls": 33219, + "ĠTables": 33220, + "Topic": 33221, + "ĠAccuracy": 33222, + "=/": 33223, + "lost": 33224, + "ĠRei": 33225, + "Ġprogresses": 33226, + "bear": 33227, + "Ġestablishments": 33228, + "Justin": 33229, + "ĠPeach": 33230, + "ĠGomez": 33231, + "å¿": 33232, + "ĠTriangle": 33233, + "Ident": 33234, + "ĠHive": 33235, + "Resources": 33236, + "Ġmixes": 33237, + "ĠAssuming": 33238, + "Mu": 33239, + "Ġhypoc": 33240, + "Ġsane": 33241, + "ĠWan": 33242, + "idious": 33243, + "Success": 33244, + "Ġio": 33245, + "Angel": 33246, + "Ġdangerously": 33247, + "ĠCreature": 33248, + "WORK": 33249, + ":[": 33250, + "ĠKatrina": 33251, + "Listener": 33252, + "Miller": 33253, + "ĠIdlib": 33254, + "hang": 33255, + "Ġcircumvent": 33256, + "href": 33257, + "Ġcelestial": 33258, + "ĠWeeks": 33259, + "ĠPug": 33260, + "ĠDalton": 33261, + "Ġsubpoena": 33262, + "uku": 33263, + "Ġpersisted": 33264, + "pei": 33265, + "olding": 33266, + "ĠDocuments": 33267, + "ĠHast": 33268, + "ĠCENT": 33269, + "Ġprimer": 33270, + "Ġsynonymous": 33271, + "Ġnib": 33272, + "ombs": 33273, + "Ġnotation": 33274, + "ĠDish": 33275, + "ĠAtmosp": 33276, + "Ġforbid": 33277, + "ĠANG": 33278, + "pattern": 33279, + "los": 33280, + "Ġprojectiles": 33281, + "brown": 33282, + ".\",": 33283, + "ĠVenom": 33284, + "Ġfiercely": 33285, + "ublished": 33286, + "ĠUran": 33287, + "ĠNicarag": 33288, + "410": 33289, + "ĠCAL": 33290, + "OTOS": 33291, + "ĠMiracle": 33292, + "ĠEnchant": 33293, + "Ġguarding": 33294, + "append": 33295, + "Attach": 33296, + "Ġleveled": 33297, + "Ġcondoms": 33298, + "ihilation": 33299, + "649": 33300, + "Ġnightmares": 33301, + "ĠTHEY": 33302, + "ĠSTART": 33303, + "ĠKinn": 33304, + "Ġroommate": 33305, + "Ġhygiene": 33306, + "opping": 33307, + "Job": 33308, + "Ġlvl": 33309, + "ĠVER": 33310, + "ĠKeeping": 33311, + "abetic": 33312, + "Ġformatting": 33313, + "erala": 33314, + "Ġrevisions": 33315, + "Ġresurg": 33316, + "Tel": 33317, + "ĠGoodman": 33318, + "353": 33319, + "pod": 33320, + "Ġindisp": 33321, + "ĠTranslation": 33322, + "Ġgown": 33323, + "ĠMund": 33324, + "Ġcis": 33325, + "Ġbystand": 33326, + "collect": 33327, + "ĠPunjab": 33328, + "actively": 33329, + "ĠGamb": 33330, + "tell": 33331, + "Ġimporting": 33332, + "gencies": 33333, + "Ġlocom": 33334, + "ĠBrill": 33335, + "Holy": 33336, + "ĠBerger": 33337, + "Ġshowdown": 33338, + "Ġresponders": 33339, + "ILY": 33340, + "Ġtakedown": 33341, + "leted": 33342, + "Ġmattered": 33343, + "Ġpredictive": 33344, + "Ġoverlay": 33345, + "GPU": 33346, + "ĠVick": 33347, + "Ġconveyed": 33348, + "Tab": 33349, + "peer": 33350, + "Scan": 33351, + "Ġdefensively": 33352, + "vae": 33353, + "Ġapproving": 33354, + "Ġtiers": 33355, + "ĠVia": 33356, + "querade": 33357, + "ĠSaudis": 33358, + "Ġdemolished": 33359, + "ĠProphe": 33360, + "Ġmono": 33361, + "Ġhospitality": 33362, + "HAM": 33363, + "ĠAriel": 33364, + "MOD": 33365, + "ĠTorah": 33366, + "Ġblah": 33367, + "ĠBelarus": 33368, + "erential": 33369, + "ĠTuc": 33370, + "Ġbanker": 33371, + "397": 33372, + "Ġmosquit": 33373, + "ĠScientist": 33374, + "ĠMusical": 33375, + "Ġhust": 33376, + "Shift": 33377, + "Ġtorment": 33378, + "Ġstandoff": 33379, + "Educ": 33380, + "ĠFog": 33381, + "Ġamplifier": 33382, + "Shape": 33383, + "Instance": 33384, + "ĠCritics": 33385, + "Ġdaemon": 33386, + "Houston": 33387, + "Ġmattress": 33388, + "ĠIDF": 33389, + "Ġobscene": 33390, + "ĠAmer": 33391, + "hetti": 33392, + "Ġcompiling": 33393, + "352": 33394, + "verett": 33395, + "ĠReduction": 33396, + "istration": 33397, + "ĠBlessed": 33398, + "ĠBachelor": 33399, + "316": 33400, + "Ġprank": 33401, + "ĠVulcan": 33402, + "dding": 33403, + "Ġmourning": 33404, + "ĠQuint": 33405, + "ĠBlaster": 33406, + "testing": 33407, + "Ġsediment": 33408, + ">>>": 33409, + "ĠEternity": 33410, + "ĠWHERE": 33411, + "ĠMaze": 33412, + "Ġreacting": 33413, + "ĠAlv": 33414, + "omsday": 33415, + "ĠCRA": 33416, + "Ġtranslator": 33417, + "Ġbogus": 33418, + "atu": 33419, + "Website": 33420, + "olls": 33421, + "Ġbaptism": 33422, + "Ġsibling": 33423, + "ĠAutumn": 33424, + "vez": 33425, + "ãģ®é": 33426, + "guards": 33427, + "Georg": 33428, + "assadors": 33429, + "ĠFreud": 33430, + "Ġcontinents": 33431, + "ĠRegistry": 33432, + "Bernie": 33433, + "ĸļ士": 33434, + "Ġtolerant": 33435, + "ĠUW": 33436, + "Ġhorribly": 33437, + "995": 33438, + "ĠMIDI": 33439, + "Ġimpatient": 33440, + "ocado": 33441, + "eri": 33442, + "ĠWorst": 33443, + "ĠNorris": 33444, + "ĠTalking": 33445, + "Ġdefends": 33446, + "ensable": 33447, + "Ġ2021": 33448, + "Ġanatomy": 33449, + "Lew": 33450, + "Ġdrawer": 33451, + "ĠCanberra": 33452, + "Ġpatriotic": 33453, + "é¾įåĸļ士": 33454, + "ĠAvg": 33455, + "ARM": 33456, + "Ġundisclosed": 33457, + "Ġfarewell": 33458, + "459": 33459, + "bable": 33460, + "ĠAllison": 33461, + "OLOG": 33462, + "Ġconco": 33463, + "tight": 33464, + "ĠACPI": 33465, + "ĠMines": 33466, + "lich": 33467, + "ĠâĶľ": 33468, + "represented": 33469, + "200000": 33470, + "Ġenthusiast": 33471, + "OTS": 33472, + "bil": 33473, + "ĠIngredients": 33474, + "Ġinventor": 33475, + "ĠMySQL": 33476, + "³³³": 33477, + "ĠABOUT": 33478, + "within": 33479, + "Ġmk": 33480, + "Bul": 33481, + "ĠFake": 33482, + "Ġdraconian": 33483, + "Wa": 33484, + "helm": 33485, + "ĠTerran": 33486, + "erville": 33487, + "Ġcommonplace": 33488, + "SIZE": 33489, + "Ġ\"<": 33490, + "replace": 33491, + "ographs": 33492, + "ĠSELECT": 33493, + "incible": 33494, + "ĠMostly": 33495, + "ĠSheffield": 33496, + "ĠIDE": 33497, + "uggle": 33498, + "Ġcitations": 33499, + "hurst": 33500, + "ĠUnix": 33501, + "Ġunleash": 33502, + "ĠPiper": 33503, + "ĠNano": 33504, + "Ġsuccumb": 33505, + "Ġreluctance": 33506, + "Ġ2500": 33507, + "ĠMerchant": 33508, + "Ġwiret": 33509, + "Ġcombos": 33510, + "ĠBirthday": 33511, + "Ġcharcoal": 33512, + "ĠUPS": 33513, + "ĠFairfax": 33514, + "Ġdriveway": 33515, + "ĠTek": 33516, + "ĠPitch": 33517, + "overe": 33518, + "Ġtechnicians": 33519, + "ĠActual": 33520, + "flation": 33521, + "ĠFiscal": 33522, + "ĠEmpty": 33523, + "anamo": 33524, + "Ġmagnesium": 33525, + "Ġslut": 33526, + "Ġgrowers": 33527, + "Investigators": 33528, + "():": 33529, + "ĠSatellite": 33530, + "ĠKeynes": 33531, + "missive": 33532, + "lane": 33533, + "Ġborough": 33534, + "344": 33535, + "ĠTEAM": 33536, + "ĠBethesda": 33537, + "CV": 33538, + "hower": 33539, + "ĠRAD": 33540, + "Ġchant": 33541, + "ĠRiy": 33542, + "Ġcompositions": 33543, + "Ġmildly": 33544, + "Ġmeddling": 33545, + "Ġagility": 33546, + "aneers": 33547, + "501": 33548, + "Ġsynth": 33549, + "linger": 33550, + "291": 33551, + "Ġexclaimed": 33552, + "Party": 33553, + "Ġcontamin": 33554, + "ĠManor": 33555, + "ĠRespond": 33556, + "Ġpraising": 33557, + "Ġmanners": 33558, + "fleet": 33559, + "Summer": 33560, + "ĠLynd": 33561, + "ĠDefinitely": 33562, + "grim": 33563, + "Ġbowling": 33564, + "stri": 33565, + "çĽ": 33566, + "ynt": 33567, + "Ġmandates": 33568, + "DIV": 33569, + "Ġreconcile": 33570, + "views": 33571, + "ĠDamon": 33572, + "vette": 33573, + "Flo": 33574, + "ĠGreatest": 33575, + "ilon": 33576, + "icia": 33577, + "Ġportrayal": 33578, + "Ġcushion": 33579, + "504": 33580, + "1979": 33581, + "ossal": 33582, + "Applic": 33583, + "scription": 33584, + "Ġmitigation": 33585, + "ATS": 33586, + "pac": 33587, + "Ġerased": 33588, + "Ġdeficiencies": 33589, + "ĠHollande": 33590, + "ĠXu": 33591, + "Ġbred": 33592, + "Ġpregnancies": 33593, + "femin": 33594, + "Ġemph": 33595, + "Ġplanners": 33596, + "Ġoutper": 33597, + "uttering": 33598, + "Ġperpetrator": 33599, + "Ġmotto": 33600, + "ĠEllison": 33601, + "ĠNEVER": 33602, + "Ġadmittedly": 33603, + "ARI": 33604, + "ĠAzerbaijan": 33605, + "Ġmillisec": 33606, + "Ġcombustion": 33607, + "ĠBottle": 33608, + "ĠLund": 33609, + "ĠPs": 33610, + "ĠDress": 33611, + "Ġfabricated": 33612, + "Ġbattered": 33613, + "Ġsidel": 33614, + "ĠNotting": 33615, + "Foreign": 33616, + "ĠJerome": 33617, + "020": 33618, + "ĠArbit": 33619, + "Ġknots": 33620, + "ĠRIGHT": 33621, + "Moving": 33622, + "ãģĻ": 33623, + "Ġsurgeries": 33624, + "Ġcourthouse": 33625, + "Ġmastered": 33626, + "Ġhovering": 33627, + "ĠBran": 33628, + "ĠAlison": 33629, + "Ġsafest": 33630, + "military": 33631, + "Ġbullied": 33632, + "Ġbarrage": 33633, + "Reader": 33634, + "ESE": 33635, + "ĠGeographic": 33636, + "Tools": 33637, + "314": 33638, + "ĠGeek": 33639, + "roth": 33640, + "glers": 33641, + "ĠFIN": 33642, + "Ïģ": 33643, + "ĠAston": 33644, + "altern": 33645, + "488": 33646, + "Ġveterin": 33647, + "Gamer": 33648, + "Ġintel": 33649, + "renches": 33650, + "Shield": 33651, + "Ġamnesty": 33652, + "ĠBhar": 33653, + "Ġpiled": 33654, + "Ġhonorable": 33655, + "ĠInstitutes": 33656, + "Ġsoaked": 33657, + "Ġcoma": 33658, + "ĠEFF": 33659, + "341": 33660, + "bytes": 33661, + "ĠGmail": 33662, + "lein": 33663, + "ĠCanadiens": 33664, + "material": 33665, + "Il": 33666, + "Ġinstructors": 33667, + "ĠKY": 33668, + "Ġconceive": 33669, + "ubb": 33670, + "ĠPossible": 33671, + "Ġeasing": 33672, + "ĠChristina": 33673, + "Ġcaric": 33674, + "ĠHDR": 33675, + "ROM": 33676, + "Ġshovel": 33677, + "delete": 33678, + "Ġpuff": 33679, + "ĠChanging": 33680, + "Ġseamlessly": 33681, + "Attribute": 33682, + "Ġacquisitions": 33683, + "akery": 33684, + "ĠEF": 33685, + "Ġautistic": 33686, + "ĠTakes": 33687, + "ĠPowder": 33688, + "ĠStir": 33689, + "510": 33690, + "ĠBubble": 33691, + "settings": 33692, + "ĠFowler": 33693, + "Ġmustard": 33694, + "Ġmoreover": 33695, + "Ġcopyrighted": 33696, + "ĠLEDs": 33697, + "1500": 33698, + "æī": 33699, + "ĠHIS": 33700, + "enf": 33701, + "Ġcustod": 33702, + "ĠHuck": 33703, + "Gi": 33704, + "Ġimg": 33705, + "Answer": 33706, + "Ct": 33707, + "jay": 33708, + "ĠInfrastructure": 33709, + "Ġfederally": 33710, + "Loc": 33711, + "Ġmicrobes": 33712, + "Ġoverrun": 33713, + "dds": 33714, + "otent": 33715, + "adiator": 33716, + ">>>>>>>>": 33717, + "Ġtornado": 33718, + "Ġadjud": 33719, + "Ġintrigued": 33720, + "Ġsi": 33721, + "ĠRevelation": 33722, + "progress": 33723, + "Ġburglary": 33724, + "ĠSaiyan": 33725, + "ĠKathy": 33726, + "Ġserpent": 33727, + "ĠAndreas": 33728, + "Ġcompel": 33729, + "essler": 33730, + "ĠPlastic": 33731, + "ĠAdvent": 33732, + "ĠPositive": 33733, + "ĠQt": 33734, + "ĠHindus": 33735, + "registered": 33736, + "ularity": 33737, + "Ġrighteousness": 33738, + "Ġdemonic": 33739, + "uitive": 33740, + "ĠBDS": 33741, + "ĠGregg": 33742, + "cia": 33743, + "ĠCrusade": 33744, + "ĠSinai": 33745, + "WARE": 33746, + "+(": 33747, + "Ġmell": 33748, + "Ġderail": 33749, + "yards": 33750, + "Ast": 33751, + "Ġnoticeably": 33752, + "ĠOber": 33753, + "Ram": 33754, + "Ġunnoticed": 33755, + "Ġseq": 33756, + "avage": 33757, + "Ts": 33758, + "Ġ640": 33759, + "Ġconcede": 33760, + "Ġ])": 33761, + "Fill": 33762, + "Ġcaptivity": 33763, + "ĠImprovement": 33764, + "ĠCrusader": 33765, + "araoh": 33766, + "MAP": 33767, + "æĹ": 33768, + "Ġstride": 33769, + "always": 33770, + "Fly": 33771, + "Nit": 33772, + "Ġalgae": 33773, + "ĠCooking": 33774, + "ĠDoors": 33775, + "Malley": 33776, + "Ġpolicemen": 33777, + "ãģį": 33778, + "Ġastronaut": 33779, + "accessible": 33780, + "495": 33781, + "ĠRAW": 33782, + "cliffe": 33783, + "udicrous": 33784, + "Ġdepended": 33785, + "alach": 33786, + "Ġventures": 33787, + "rake": 33788, + "Ġtits": 33789, + "ĠHou": 33790, + "Ġcondom": 33791, + "ormonal": 33792, + "Ġindent": 33793, + "Ġuploading": 33794, + "Footnote": 33795, + "Important": 33796, + "Ġ271": 33797, + "Ġmindful": 33798, + "Ġcontends": 33799, + "Cra": 33800, + "Ġcalibr": 33801, + "ĠOECD": 33802, + "plugin": 33803, + "Fat": 33804, + "ĠISS": 33805, + "ĠDynamics": 33806, + "ansen": 33807, + "686": 33808, + "'),": 33809, + "Ġsprite": 33810, + "Ġhandheld": 33811, + "ĠHipp": 33812, + "=~=~": 33813, + "Trust": 33814, + "Ġsemantics": 33815, + "ĠBundes": 33816, + "ĠReno": 33817, + "ĠLiterature": 33818, + "sense": 33819, + "Gary": 33820, + "ĠAeg": 33821, + "ĠTrin": 33822, + "EEK": 33823, + "Ġcleric": 33824, + "ĠSSH": 33825, + "Ġchrist": 33826, + "Ġinvading": 33827, + "ibu": 33828, + "Ġenum": 33829, + "aura": 33830, + "Ġallege": 33831, + "ĠIncredible": 33832, + "BBC": 33833, + "Ġthru": 33834, + "Ġsailed": 33835, + "Ġemulate": 33836, + "Ġinsecurity": 33837, + "Ġcrou": 33838, + "Ġaccommodations": 33839, + "Ġincompetent": 33840, + "Ġslips": 33841, + "ĠEarthqu": 33842, + "sama": 33843, + "ILLE": 33844, + "ĠiPhones": 33845, + "asaki": 33846, + "Ġbye": 33847, + "Ġard": 33848, + "Ġextras": 33849, + "Ġslaughtered": 33850, + "Ġcrowdfunding": 33851, + "resso": 33852, + "Ġfilib": 33853, + "ĠERROR": 33854, + "ĠTLS": 33855, + "egg": 33856, + "ĠItal": 33857, + "Ġenlist": 33858, + "ĠCatalonia": 33859, + "ĠScots": 33860, + "Ġsergeant": 33861, + "Ġdissolve": 33862, + "NH": 33863, + "Ġstandings": 33864, + "rique": 33865, + "IQ": 33866, + "Ġbeneficiary": 33867, + "Ġaquarium": 33868, + "YouTube": 33869, + "ĠPowerShell": 33870, + "Ġbrightest": 33871, + "ĠWarrant": 33872, + "Sold": 33873, + "Writing": 33874, + "Ġbeginnings": 33875, + "ĠReserved": 33876, + "ĠLatinos": 33877, + "heading": 33878, + "Ġ440": 33879, + "Ġrooftop": 33880, + "ATING": 33881, + "Ġ390": 33882, + "VPN": 33883, + "Gs": 33884, + "kernel": 33885, + "turned": 33886, + "Ġpreferable": 33887, + "Ġturnovers": 33888, + "ĠHels": 33889, + "Sa": 33890, + "ĠShinji": 33891, + "veh": 33892, + "ĠMODULE": 33893, + "Viol": 33894, + "Ġexiting": 33895, + "Ġjab": 33896, + "ĠVanilla": 33897, + "Ġacron": 33898, + "ĠGap": 33899, + "bern": 33900, + "Ak": 33901, + "ĠMcGu": 33902, + "Ġendlessly": 33903, + "ĠFarage": 33904, + "ĠNoel": 33905, + "Va": 33906, + "MK": 33907, + "Ġbrute": 33908, + "ĠKru": 33909, + "ĠESV": 33910, + "ĠOlivia": 33911, + "âĢł": 33912, + "ĠKaf": 33913, + "Ġtrusting": 33914, + "Ġhots": 33915, + "324": 33916, + "Ġmalaria": 33917, + "Ġjson": 33918, + "Ġpounding": 33919, + "ortment": 33920, + "Country": 33921, + "Ġpostponed": 33922, + "Ġunequiv": 33923, + "?),": 33924, + "ĠRooney": 33925, + "udding": 33926, + "ĠLeap": 33927, + "urrence": 33928, + "shapeshifter": 33929, + "ĠHAS": 33930, + "osate": 33931, + "Ġcavern": 33932, + "Ġconservatism": 33933, + "ĠBAD": 33934, + "Ġmileage": 33935, + "Ġarresting": 33936, + "Vaults": 33937, + "Ġmixer": 33938, + "Democratic": 33939, + "ĠBenson": 33940, + "Ġauthored": 33941, + "8000": 33942, + "Ġproactive": 33943, + "ĠSpiritual": 33944, + "tre": 33945, + "Ġincarcerated": 33946, + "ĠSort": 33947, + "Ġpeaked": 33948, + "Ġwielding": 33949, + "reciation": 33950, + "×Ļ×": 33951, + "Patch": 33952, + "ĠEmmy": 33953, + "Ġexqu": 33954, + "tto": 33955, + "ĠRatio": 33956, + "ĠPicks": 33957, + "ĠGry": 33958, + "phant": 33959, + "Ġfret": 33960, + "Ġethn": 33961, + "Ġarchived": 33962, + "%-": 33963, + "cases": 33964, + "ĠBlaze": 33965, + "Ġimb": 33966, + "cv": 33967, + "yss": 33968, + "imony": 33969, + "Ġcountdown": 33970, + "Ġawakening": 33971, + "ĠTunisia": 33972, + "ĠRefer": 33973, + "ĠMJ": 33974, + "Ġunnatural": 33975, + "ĠCarnegie": 33976, + "izen": 33977, + "ĠNuggets": 33978, + "hess": 33979, + "Ġevils": 33980, + "647": 33981, + "Ġintroductory": 33982, + "loving": 33983, + "ĠMcMahon": 33984, + "Ġambiguity": 33985, + "Label": 33986, + "ĠAlmighty": 33987, + "Ġcoloring": 33988, + "ĠClaus": 33989, + "setting": 33990, + "NULL": 33991, + "ĠFavorite": 33992, + "ĠSIG": 33993, + ">(": 33994, + "ĠShiva": 33995, + "ĠMayer": 33996, + "Ġstormed": 33997, + "ĠCoverage": 33998, + "weapons": 33999, + "igham": 34000, + "Ġunanswered": 34001, + "Ġleve": 34002, + "Ġcoy": 34003, + "cas": 34004, + "bags": 34005, + "asured": 34006, + "Seattle": 34007, + "ĠSantorum": 34008, + "serious": 34009, + "Ġcourageous": 34010, + "ĠSoup": 34011, + "Ġconfiscated": 34012, + "Ġ///": 34013, + "Ġunconventional": 34014, + "Ġmoms": 34015, + "ĠRohingya": 34016, + "ĠOrchestra": 34017, + "ĠPotion": 34018, + "Ġdiscredit": 34019, + "ĠFIL": 34020, + "fixed": 34021, + "ĠDeer": 34022, + "doi": 34023, + "ĠDimension": 34024, + "Ġbureaucrats": 34025, + "eteen": 34026, + "ĠactionGroup": 34027, + "ohm": 34028, + "Ġbumps": 34029, + "ĠUtility": 34030, + "Ġsubmarines": 34031, + "renheit": 34032, + "research": 34033, + "ĠShapiro": 34034, + "Ġsketches": 34035, + "Ġdeceptive": 34036, + "ĠVil": 34037, + "esame": 34038, + "ĠEssentially": 34039, + "Ġrampage": 34040, + "isky": 34041, + "Ġmuttered": 34042, + "thritis": 34043, + "Ġ236": 34044, + "fet": 34045, + "bars": 34046, + "Ġpupil": 34047, + "ĠThou": 34048, + "oS": 34049, + "song": 34050, + "Ġfractured": 34051, + "Ġrevert": 34052, + "picture": 34053, + "Ġcriterion": 34054, + "usher": 34055, + "Ġrepercussions": 34056, + "ĠVintage": 34057, + "ĠSuperintendent": 34058, + "Officers": 34059, + "Ġflagged": 34060, + "Ġblames": 34061, + "Ġinverse": 34062, + "ographers": 34063, + "Ġmakeshift": 34064, + "Ġdevoid": 34065, + "Ġfossils": 34066, + "ĠAristotle": 34067, + "ĠFunds": 34068, + "Ġdepleted": 34069, + "ĠFlu": 34070, + "ĠYuan": 34071, + "Ġwoes": 34072, + "Ġlipid": 34073, + "Ġsitu": 34074, + "requisites": 34075, + "Ġfurnish": 34076, + "ĠSamar": 34077, + "Ġshameful": 34078, + "Ġadversely": 34079, + "Ġadept": 34080, + "Ġremorse": 34081, + "Ġmurderous": 34082, + "uckles": 34083, + "ĠESL": 34084, + "Ġ314": 34085, + "sent": 34086, + "Ġredef": 34087, + "ĠCache": 34088, + "ĠPurs": 34089, + "igans": 34090, + "Ġ460": 34091, + "Ġprescriptions": 34092, + "Ġfres": 34093, + "Fuck": 34094, + "ocrates": 34095, + "Twenty": 34096, + "ĠWeird": 34097, + "ĠToggle": 34098, + "ĠCalled": 34099, + "itizens": 34100, + "Ġpoultry": 34101, + "Ġharvesting": 34102, + "ãĤ¦ãĤ¹": 34103, + "Bottom": 34104, + "Ġcautioned": 34105, + "tn": 34106, + "396": 34107, + "ĠNikki": 34108, + "Ġevaluations": 34109, + "Ġharassing": 34110, + "Ġbindings": 34111, + "ĠMonetary": 34112, + "Ġhitters": 34113, + "Ġadversary": 34114, + "unts": 34115, + "Ġsetback": 34116, + "Ġencrypt": 34117, + "ĠCait": 34118, + "Ġlows": 34119, + "enges": 34120, + "ĠNorn": 34121, + "Ġbulbs": 34122, + "Ġbottled": 34123, + "ĠVoyager": 34124, + "317": 34125, + "Ġspheres": 34126, + "politics": 34127, + "Ġsubtract": 34128, + "Ġsensations": 34129, + "Ġappalling": 34130, + "Ġ316": 34131, + "Ġenvironmentally": 34132, + "ĠSTEM": 34133, + "Ġpublishes": 34134, + "560": 34135, + "Ġdiligence": 34136, + "484": 34137, + "Ġadvises": 34138, + "Ġpetrol": 34139, + "Ġimagining": 34140, + "Ġpatrols": 34141, + "ĠInteger": 34142, + "ĠAshes": 34143, + "actus": 34144, + "ĠRadiant": 34145, + "ĠLT": 34146, + "itability": 34147, + "htaking": 34148, + "Setting": 34149, + "Ġnuanced": 34150, + "ĠReef": 34151, + "ĠDevelopers": 34152, + "Ni": 34153, + "pieces": 34154, + "990": 34155, + "License": 34156, + "Ġlowers": 34157, + "ĠOttoman": 34158, + "327": 34159, + "ooo": 34160, + "Ġquitting": 34161, + "markets": 34162, + "Behind": 34163, + "Ġbasin": 34164, + "Ġdocs": 34165, + "anie": 34166, + "flash": 34167, + "ctl": 34168, + "Ġcivilized": 34169, + "ĠFukushima": 34170, + "\"],\"": 34171, + "ĠKS": 34172, + "ĠHonestly": 34173, + "arat": 34174, + "Ġconstructs": 34175, + "ĠLans": 34176, + "ĠDire": 34177, + "ĠLIKE": 34178, + "ĠTrouble": 34179, + "Ġwithholding": 34180, + "ĠOblivion": 34181, + "Ġsanity": 34182, + "anya": 34183, + "Const": 34184, + "Ġgrocer": 34185, + "ĠCelsius": 34186, + "Ġrecounted": 34187, + "ĠWife": 34188, + "Border": 34189, + "atered": 34190, + "happy": 34191, + "Ġspoiler": 34192, + "Ġlogically": 34193, + "Hall": 34194, + "Ġsucceeding": 34195, + "Ġpolymorph": 34196, + "Ġaxes": 34197, + "ĠShotgun": 34198, + "ĠSlim": 34199, + "ĠPrinciples": 34200, + "ĠLeth": 34201, + "arta": 34202, + "Ġscor": 34203, + "Screenshot": 34204, + "Ġrelaxation": 34205, + "#$#$": 34206, + "Ġdeterrent": 34207, + "iddy": 34208, + "Ġpowerless": 34209, + "Ġlesbians": 34210, + "Ġchords": 34211, + "ĠEdited": 34212, + "selected": 34213, + "Ġseparatists": 34214, + "0002": 34215, + "Ġairspace": 34216, + "Ġturnaround": 34217, + "Ġcunning": 34218, + "PATH": 34219, + "Poly": 34220, + "Ġbombed": 34221, + "Ġtion": 34222, + "xs": 34223, + "Ġwithhold": 34224, + "Ġwaged": 34225, + "ĠLiberties": 34226, + "Flag": 34227, + "Ġcomforting": 34228, + "454": 34229, + "ĠIris": 34230, + "arers": 34231, + "Ġrag": 34232, + "Ġrelocated": 34233, + "ĠGuarant": 34234, + "Ġstrategically": 34235, + "Ġgamma": 34236, + "uberty": 34237, + "ĠLockheed": 34238, + "gres": 34239, + "Ġgrilled": 34240, + "ĠLowe": 34241, + "stats": 34242, + "ĠRocks": 34243, + "Ġsensing": 34244, + "Ġrenting": 34245, + "ĠGeological": 34246, + "اØ": 34247, + "otrop": 34248, + "Ġsew": 34249, + "Ġimproperly": 34250, + "486": 34251, + "Ġâĸł": 34252, + "Ġstarving": 34253, + "ĠBj": 34254, + "Discussion": 34255, + "328": 34256, + "ĠCombo": 34257, + "ĠFixes": 34258, + "NAT": 34259, + "Ġstriving": 34260, + "thora": 34261, + "Ġharvested": 34262, + "ĠPing": 34263, + "Ġplayful": 34264, + "Ġavenues": 34265, + "Ġoccupational": 34266, + "Ġwakes": 34267, + "ĠCourier": 34268, + "Ġdrummer": 34269, + "ĠBrowser": 34270, + "ĠHouth": 34271, + "itu": 34272, + "Ġapparel": 34273, + "paste": 34274, + "Ġhunted": 34275, + "ĠSecondly": 34276, + "lain": 34277, + "XY": 34278, + "ĠPIN": 34279, + "icons": 34280, + "Ġcocktails": 34281, + "Ġsizable": 34282, + "Ġhurdles": 34283, + "estinal": 34284, + "ĠRecreation": 34285, + "Ġeco": 34286, + "648": 34287, + "ĠDied": 34288, + "mint": 34289, + "Ġfingerprints": 34290, + "Ġdispose": 34291, + "ĠBosnia": 34292, + "tsy": 34293, + "2200": 34294, + "Ġinspected": 34295, + "ĠFou": 34296, + "Ġfuss": 34297, + "Ġambush": 34298, + "ĠRak": 34299, + "Ġmanifested": 34300, + "Prosecut": 34301, + "Ġsuffice": 34302, + "rences": 34303, + "Ġcompensated": 34304, + "ĠCyrus": 34305, + "Ġgenus": 34306, + "ĠWolverine": 34307, + "ĠTrends": 34308, + "Ġhikes": 34309, + "ĠSeen": 34310, + "Ġenrol": 34311, + "Cold": 34312, + "Ġpolitely": 34313, + "ĠSlav": 34314, + "ĠRupert": 34315, + "Ġeyewitness": 34316, + "ĠAlto": 34317, + "Ġuncomp": 34318, + "Ġposterior": 34319, + "Must": 34320, + "ĠHerz": 34321, + "Ġprogressively": 34322, + "Ġ234": 34323, + "Ġindifference": 34324, + "ĠCunningham": 34325, + "Ġacademia": 34326, + "Ġsewer": 34327, + "Ġastounding": 34328, + "ĠAES": 34329, + "rather": 34330, + "Ġeldest": 34331, + "Ġclimbs": 34332, + "ĠAdds": 34333, + "Ġoutcry": 34334, + "Ġcontag": 34335, + "ĠHouses": 34336, + "Ġpept": 34337, + "ĠMelania": 34338, + "interested": 34339, + "ĠUCH": 34340, + "ĠRoots": 34341, + "ĠHubbard": 34342, + "ĠTBD": 34343, + "ĠRomanian": 34344, + "filename": 34345, + "Stone": 34346, + "ĠImpl": 34347, + "Ġchromosome": 34348, + "Cle": 34349, + "dx": 34350, + "Ġscrambled": 34351, + "ĠPt": 34352, + "Ġ242": 34353, + "OPLE": 34354, + "Ġtremendously": 34355, + "Street": 34356, + "Ġcraving": 34357, + "Ġbundled": 34358, + "ĠRG": 34359, + "pipe": 34360, + "Ġinjuring": 34361, + "Ġarcane": 34362, + "Particip": 34363, + "ĠHeroic": 34364, + "sty": 34365, + "Ġtopping": 34366, + "ĠTempest": 34367, + "rentices": 34368, + "bh": 34369, + "Ġparanoia": 34370, + "ĠUnicode": 34371, + "Ġegregious": 34372, + "Ġ\\'": 34373, + "ĠOswald": 34374, + "Ġgravel": 34375, + "ĠSimpsons": 34376, + "Ġbland": 34377, + "ĠGuantanamo": 34378, + "Writer": 34379, + "liners": 34380, + "ĠDice": 34381, + "JC": 34382, + "Ġparity": 34383, + "Ġsided": 34384, + "Ġ237": 34385, + "ĠPyrrha": 34386, + "atters": 34387, + "dk": 34388, + "Fine": 34389, + "compan": 34390, + "Ġformulated": 34391, + "ĠIdol": 34392, + "ilers": 34393, + "hemoth": 34394, + "ĠFav": 34395, + "Ġintrusion": 34396, + "Ġcarrots": 34397, + "ĠLayer": 34398, + "ĠHacker": 34399, + "Ġ----------------": 34400, + "Ġmoderation": 34401, + "éģ": 34402, + "ococ": 34403, + "Ġcharacterize": 34404, + "ĠTeresa": 34405, + "Ġsocioeconomic": 34406, + "Ġperk": 34407, + "ĠParticipation": 34408, + "training": 34409, + "ĠPaulo": 34410, + "phys": 34411, + "Ġtrustworthy": 34412, + "Ġembodied": 34413, + "ĠMerch": 34414, + "currency": 34415, + "ĠPriority": 34416, + "Ġteasing": 34417, + "Ġabsorbing": 34418, + "Ġunfinished": 34419, + "ĠComparison": 34420, + "Ġdisple": 34421, + "writers": 34422, + "Ġprofessions": 34423, + "ĠPenguin": 34424, + "Ġangrily": 34425, + "ĠLINK": 34426, + "688": 34427, + "ĠCorrespond": 34428, + "Ġprevailed": 34429, + "Ġcartel": 34430, + "lp": 34431, + "asms": 34432, + "ĠRedemption": 34433, + "ĠIslamists": 34434, + "effects": 34435, + "dose": 34436, + "ĠLatter": 34437, + "ĠHalifax": 34438, + "Ġvas": 34439, + "ĠTopics": 34440, + "ĠNamed": 34441, + "advertising": 34442, + "zza": 34443, + "ICES": 34444, + "Ġretarded": 34445, + "achable": 34446, + "ĠPuppet": 34447, + "ĠItemLevel": 34448, + "Ġretract": 34449, + "Ġidentifiable": 34450, + "Aaron": 34451, + "ĠBuster": 34452, + "sol": 34453, + "helle": 34454, + "assemb": 34455, + "Hope": 34456, + "ranged": 34457, + "Ba": 34458, + "ĠPurch": 34459, + "éĢ": 34460, + "ĠSiri": 34461, + "Ġarrivals": 34462, + "Ġ1912": 34463, + "Ġshortened": 34464, + "Ġ312": 34465, + "Ġdiscrepancy": 34466, + "ĠTemperature": 34467, + "ĠWalton": 34468, + "Ġkinderg": 34469, + "polit": 34470, + "Ġremix": 34471, + "Ġconnectors": 34472, + "ãĥĺãĥ©": 34473, + "ĠKazakhstan": 34474, + "dominated": 34475, + "Ġsugars": 34476, + "imble": 34477, + "ĠPanic": 34478, + "ĠDemand": 34479, + "ĠColony": 34480, + "onen": 34481, + "ĠMER": 34482, + "775": 34483, + "uria": 34484, + "azaar": 34485, + "ĠDegree": 34486, + "Pri": 34487, + "Ġsunshine": 34488, + "Ġ251": 34489, + "Ġpsychedelic": 34490, + "Ġdigitally": 34491, + "ĠBraun": 34492, + "Ġshimmer": 34493, + "Ġshave": 34494, + "ĠTelesc": 34495, + "ĠAstral": 34496, + "ĠVenezuelan": 34497, + "ĠOG": 34498, + "Ġcrawling": 34499, + "Integ": 34500, + "ĠFeather": 34501, + "Ġunfolding": 34502, + "Ġappropriation": 34503, + "Ġè£ıè": 34504, + "ĠMobility": 34505, + "ĠNey": 34506, + "-.": 34507, + "bilt": 34508, + "LIN": 34509, + "ĠTube": 34510, + "ĠConversely": 34511, + "Ġkeyboards": 34512, + "ĠCao": 34513, + "Ġoverth": 34514, + "Ġlaure": 34515, + ">>\\": 34516, + "ĠViper": 34517, + "acha": 34518, + "Offset": 34519, + "ĠRaleigh": 34520, + "ĠJae": 34521, + "Jordan": 34522, + "jp": 34523, + "Ġtotalitarian": 34524, + "Connector": 34525, + "Ġobserves": 34526, + "ĠSpartan": 34527, + "ĠImmediately": 34528, + "ĠScal": 34529, + "Cool": 34530, + "Ġtaps": 34531, + "Ġroar": 34532, + "Past": 34533, + "Ġchars": 34534, + "ĠBender": 34535, + "ĠSheldon": 34536, + "Ġpainter": 34537, + "Ġbeacon": 34538, + "ĠCreatures": 34539, + "Ġdownturn": 34540, + "Ġhinder": 34541, + "ĠAndromeda": 34542, + "ÃĽ": 34543, + "ccoli": 34544, + "ĠFitness": 34545, + "etrical": 34546, + "Ġutilizes": 34547, + "Ġsenate": 34548, + "Ġensemble": 34549, + "Ġcheers": 34550, + "TW": 34551, + "Ġaffluent": 34552, + "kil": 34553, + "rylic": 34554, + "ordering": 34555, + "Computer": 34556, + "Ġgruesome": 34557, + "ostics": 34558, + "ĠUbisoft": 34559, + "ĠKelley": 34560, + "Ġwrench": 34561, + "Ġbourgeoisie": 34562, + "IBLE": 34563, + "ĠPreston": 34564, + "worn": 34565, + "arist": 34566, + "reating": 34567, + "Ġstained": 34568, + "arine": 34569, + "Ġslime": 34570, + "ENN": 34571, + "Ġchests": 34572, + "Ġgroundwater": 34573, + "annot": 34574, + "ĠTray": 34575, + "ĠLocke": 34576, + "ĠCTR": 34577, + "Ġdudes": 34578, + "ĠExternal": 34579, + "ĠDecoder": 34580, + "Ġparamed": 34581, + "ĠMedline": 34582, + "809": 34583, + "ĠDinner": 34584, + "rupal": 34585, + "gz": 34586, + "ĠGum": 34587, + "ĠDemo": 34588, + "jee": 34589, + "Ġdh": 34590, + "berman": 34591, + "archs": 34592, + "Ġenqu": 34593, + "ĠEpstein": 34594, + "Ġdevastation": 34595, + "Ġfriendships": 34596, + "ĠArd": 34597, + "Ġ231": 34598, + "ĠRubin": 34599, + "ĠDistance": 34600, + "Ġspurred": 34601, + "Ġdossier": 34602, + "Ġoverlooking": 34603, + "\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\": 34604, + "Forest": 34605, + "ĠComes": 34606, + "\\\",": 34607, + "ĠIranians": 34608, + "Ġfixtures": 34609, + "Laughs": 34610, + "Ġcurry": 34611, + "ĠKingston": 34612, + "Ġsquash": 34613, + "Ġcatalogue": 34614, + "Ġabnormalities": 34615, + "Ġdigestive": 34616, + ".........": 34617, + "Ġsubordinate": 34618, + "ogly": 34619, + "Ġ249": 34620, + "Middle": 34621, + "Ġmassac": 34622, + "Ġburgers": 34623, + "Ġdownstairs": 34624, + "Ġ1931": 34625, + "394": 34626, + "ĠVG": 34627, + "Ġlasers": 34628, + "ĠSikh": 34629, + "ĠAlexa": 34630, + "derived": 34631, + "Ġcyclist": 34632, + "ãģ®éŃĶ": 34633, + "oneliness": 34634, + "!!!!!!!!": 34635, + "Ġbuffs": 34636, + "legate": 34637, + "Ġraping": 34638, + "Ġrecommending": 34639, + "rored": 34640, + "Ġmulticultural": 34641, + "unique": 34642, + "Ġbusinessmen": 34643, + "Ġuneasy": 34644, + "ĠMAP": 34645, + "Ġdispersed": 34646, + "cipline": 34647, + "Jess": 34648, + "ĠKerala": 34649, + "å§": 34650, + "Ġabstraction": 34651, + "Surv": 34652, + "Uh": 34653, + "Ġprinters": 34654, + "ija": 34655, + "owder": 34656, + "Ġanalogous": 34657, + "ĠASP": 34658, + "afer": 34659, + "Ġunfolded": 34660, + "Ġleveling": 34661, + "Ġbreached": 34662, + "ĠHearing": 34663, + "Ġnat": 34664, + "Ġtranslating": 34665, + "critical": 34666, + "Ġantagonist": 34667, + "ĠYesterday": 34668, + "Ġfuzzy": 34669, + "wash": 34670, + "mere": 34671, + "Ġbewild": 34672, + "ĠMae": 34673, + "Virgin": 34674, + "phrase": 34675, + "Ġsignaled": 34676, + "ĠHIGH": 34677, + "Ġprotester": 34678, + "Ġgarner": 34679, + "unknown": 34680, + "Ġkay": 34681, + "Ġabducted": 34682, + "Ġstalking": 34683, + "amn": 34684, + "Ġdeserving": 34685, + "ĠRiv": 34686, + "ĠJorge": 34687, + "Ġscratching": 34688, + "ĠSaving": 34689, + "iping": 34690, + "Ġtease": 34691, + "Ġmissionary": 34692, + "ĠMorrow": 34693, + "TIME": 34694, + "Present": 34695, + "Ġchemotherapy": 34696, + "terness": 34697, + "ĠHomes": 34698, + "ĠPurdue": 34699, + "Ġstaunch": 34700, + "ĠWhitney": 34701, + "ĠTHERE": 34702, + "μ": 34703, + "iatus": 34704, + "ĠErnest": 34705, + "ĠDeploy": 34706, + "Ġcoveted": 34707, + "FML": 34708, + "ĠDialogue": 34709, + "Ġexited": 34710, + "fruit": 34711, + "Ġnerd": 34712, + "\":\"\",\"": 34713, + "Ġvivo": 34714, + "ruly": 34715, + "460": 34716, + "ĠAmen": 34717, + "rehensible": 34718, + "Ġâĺ": 34719, + "DIR": 34720, + "Ġadherence": 34721, + "Ġchew": 34722, + "ĠCoke": 34723, + "ĠSergei": 34724, + "digital": 34725, + "ĠNeck": 34726, + "gently": 34727, + "enthal": 34728, + "/)": 34729, + "Ġweary": 34730, + "Ġguise": 34731, + "ĠConcord": 34732, + "ĠOnion": 34733, + "atcher": 34734, + "Ġbinge": 34735, + "ĠDirective": 34736, + "Ġmanned": 34737, + "ansk": 34738, + "Ġillusions": 34739, + "Ġbillionaires": 34740, + "383": 34741, + "olyn": 34742, + "odynamic": 34743, + "ĠWheat": 34744, + "ĠAlic": 34745, + "Ġcoloured": 34746, + "ĠNAFTA": 34747, + "abo": 34748, + "Ġmacros": 34749, + "independent": 34750, + "sweet": 34751, + "Ġspac": 34752, + "ĠKabul": 34753, + "ĠÄ": 34754, + "eme": 34755, + "Ġdictated": 34756, + "Ġshouts": 34757, + "={": 34758, + "Ġripping": 34759, + "ĠShay": 34760, + "ĠCricket": 34761, + "directed": 34762, + "Ġanalysed": 34763, + "ĠWARRANT": 34764, + "agons": 34765, + "ĠBlazers": 34766, + "Ġcheered": 34767, + "Ġarithmetic": 34768, + "ĠTanz": 34769, + "373": 34770, + "ĠFlags": 34771, + "Ġ295": 34772, + "Ġwitches": 34773, + "ĠIncluded": 34774, + "ĠGained": 34775, + "ĠBlades": 34776, + "Gam": 34777, + "ĠSamantha": 34778, + "ĠAtlantis": 34779, + "ĠPratt": 34780, + "Ġspoiled": 34781, + "ĠIB": 34782, + "ĠRamirez": 34783, + "Probably": 34784, + "rero": 34785, + "ĠNg": 34786, + "ĠWarlock": 34787, + "tp": 34788, + "Ġoverhe": 34789, + "Ġadministrations": 34790, + "Ġtint": 34791, + "Ġregiment": 34792, + "Ġpistols": 34793, + "Ġblankets": 34794, + "Ġepist": 34795, + "Ġbowls": 34796, + "Ġhydraulic": 34797, + "Ġdean": 34798, + "Ġjung": 34799, + "Ġascend": 34800, + "705": 34801, + "ĠSantiago": 34802, + "î": 34803, + "Ġunavoid": 34804, + "ĠShaman": 34805, + "reb": 34806, + "Ġstemming": 34807, + "998": 34808, + "ĠMG": 34809, + "sticks": 34810, + "esthesia": 34811, + "ERO": 34812, + "Ġmorbid": 34813, + "ĠGrill": 34814, + "ĠPoe": 34815, + "anyl": 34816, + "Ġdeleting": 34817, + "ĠSurveillance": 34818, + "Ġdirectives": 34819, + "Ġiterations": 34820, + "ĠRox": 34821, + "ĠMilky": 34822, + "Father": 34823, + "Ġpatented": 34824, + "447": 34825, + "Ġprecursor": 34826, + "Ġmaiden": 34827, + "ĠPhen": 34828, + "ĠVegan": 34829, + "ĠPatent": 34830, + "Kelly": 34831, + "Redditor": 34832, + "Ġnods": 34833, + "Ġventilation": 34834, + "ĠSchwarz": 34835, + "Ġwizards": 34836, + "Ġominous": 34837, + "ĠHeads": 34838, + "ĠBG": 34839, + "Ġlumber": 34840, + "ĠSpiel": 34841, + "ĠisEnabled": 34842, + "Ġancestral": 34843, + "ĠShips": 34844, + "Ġwrestler": 34845, + "phi": 34846, + "Ġyuan": 34847, + "ĠRebellion": 34848, + "Ġiceberg": 34849, + "Ġmagically": 34850, + "Ġdiversion": 34851, + "arro": 34852, + "ythm": 34853, + "ĠRiders": 34854, + "ĠRobbie": 34855, + "ĠKara": 34856, + "ĠMaintenance": 34857, + "ĠHerb": 34858, + "Ġharms": 34859, + "packed": 34860, + "ĠFeinstein": 34861, + "Ġmarrying": 34862, + "Ġblending": 34863, + "ĠRates": 34864, + "Ġ1880": 34865, + "Ġwrink": 34866, + "ĠUnch": 34867, + "ĠTorch": 34868, + "described": 34869, + "Ġhumanoid": 34870, + "ilitating": 34871, + "ĠConv": 34872, + "ĠFeld": 34873, + "IGHTS": 34874, + "Ġwhistleblower": 34875, + "ortmund": 34876, + "etsy": 34877, + "arrett": 34878, + "ĠMono": 34879, + "ĠIke": 34880, + "ĠCNBC": 34881, + "ĠWAY": 34882, + "ĠMDMA": 34883, + "ĠIndividuals": 34884, + "Ġsupplemental": 34885, + "Ġpowerhouse": 34886, + "ĠStru": 34887, + "Focus": 34888, + "aphael": 34889, + "ĠColleg": 34890, + "atti": 34891, + "ZA": 34892, + "Ġperenn": 34893, + "ĠSignature": 34894, + "ĠRodney": 34895, + "Ġcubes": 34896, + "iddled": 34897, + "ĠDante": 34898, + "ĠINV": 34899, + "ilingual": 34900, + "ĠCth": 34901, + "Ġsofa": 34902, + "Ġintimidate": 34903, + "ĠRoe": 34904, + "ĠDiplom": 34905, + "ĠCountries": 34906, + "ayson": 34907, + "Ġextradition": 34908, + "Ġdisabling": 34909, + "ĠCardiff": 34910, + "Ġmemorandum": 34911, + "ĠTrace": 34912, + "Ġ???": 34913, + "sector": 34914, + "ĠRouhani": 34915, + "ĠYates": 34916, + "ĠFreeze": 34917, + "Ġbladder": 34918, + "Motor": 34919, + "ĠPromise": 34920, + "antasy": 34921, + "Ġforeseeable": 34922, + "ĠCologne": 34923, + "container": 34924, + "ĠTrees": 34925, + "ĠGors": 34926, + "ĠSinclair": 34927, + "Ġbarring": 34928, + "keye": 34929, + "Ġslashed": 34930, + "ĠStatistical": 34931, + "éĩ": 34932, + "Ġâĸº": 34933, + "Allows": 34934, + "Ġhumility": 34935, + "Ġdrilled": 34936, + "ĠFurn": 34937, + "443": 34938, + "Ġsewage": 34939, + "Ġhomepage": 34940, + "Ġcourtyard": 34941, + "Ġvile": 34942, + "Ġsubsidiaries": 34943, + "ajo": 34944, + "directory": 34945, + "Ġammon": 34946, + "Vers": 34947, + "charges": 34948, + "Ġ}}": 34949, + "ĠChains": 34950, + "Ġ246": 34951, + "nob": 34952, + "Ġpercept": 34953, + "Ġgrit": 34954, + "Ġfishermen": 34955, + "ĠIraqis": 34956, + "ĠDISTR": 34957, + "ĠFULL": 34958, + "ĠEvaluation": 34959, + "graph": 34960, + "atial": 34961, + "Ġcooperating": 34962, + "Ġmelan": 34963, + "Ġenlightened": 34964, + "Ġali": 34965, + "tailed": 34966, + "Ġsalute": 34967, + "Ġweakest": 34968, + "ĠBulldogs": 34969, + "UA": 34970, + "ĠAlloy": 34971, + "Ġsemen": 34972, + "ocene": 34973, + "ĠWilliamson": 34974, + "spr": 34975, + ",âĢĶ": 34976, + "ĠGF": 34977, + "ittens": 34978, + "Beat": 34979, + "ĠJunk": 34980, + "iphate": 34981, + "ĠFarmers": 34982, + "ĠBitcoins": 34983, + "igers": 34984, + "dh": 34985, + "ĠLoyal": 34986, + "payer": 34987, + "Ġentertained": 34988, + "Ġpenned": 34989, + "Ġcoupon": 34990, + "Queue": 34991, + "Ġweakening": 34992, + "carry": 34993, + "Ġunderestimate": 34994, + "Ġshootout": 34995, + "Ġcharismatic": 34996, + "ĠProcedure": 34997, + "Ġprudent": 34998, + "inances": 34999, + "Ġriches": 35000, + "Ġcortical": 35001, + "Ġstrides": 35002, + "Ġdrib": 35003, + "ĠOilers": 35004, + "540": 35005, + "ĠPerform": 35006, + "ĠBangkok": 35007, + "Ġeuth": 35008, + "SER": 35009, + "Ġsimplistic": 35010, + "tops": 35011, + "campaign": 35012, + "Quality": 35013, + "Ġimpoverished": 35014, + "ĠEisenhower": 35015, + "Ġaugment": 35016, + "ĠHarden": 35017, + "Ġintervened": 35018, + "Ġlistens": 35019, + "ĠKok": 35020, + "Ġsage": 35021, + "Ġrubbish": 35022, + "ĠDed": 35023, + "Ġmull": 35024, + "pelling": 35025, + "Ġvideot": 35026, + "Production": 35027, + "DJ": 35028, + "miah": 35029, + "Ġadaptations": 35030, + "Ġmedically": 35031, + "Ġboarded": 35032, + "Ġarrogance": 35033, + "Ġscrapped": 35034, + "Ġoppress": 35035, + "FORMATION": 35036, + "Ġjunction": 35037, + "415": 35038, + "EEEE": 35039, + "Skill": 35040, + "Ġsubdu": 35041, + "ĠSuggest": 35042, + "ĠPett": 35043, + "Ġlett": 35044, + "ĠManip": 35045, + "ĠCaf": 35046, + "ĠCooperation": 35047, + "Ther": 35048, + "Ġregained": 35049, + "¶æ": 35050, + "reflect": 35051, + "Ġthugs": 35052, + "ĠShelby": 35053, + "Ġdictates": 35054, + "ĠWeiner": 35055, + "ĠHale": 35056, + "Ġbattleground": 35057, + "schild": 35058, + "Ġcondol": 35059, + "hunt": 35060, + "ositories": 35061, + "Ġaccuses": 35062, + "Filename": 35063, + "Ġshri": 35064, + "Ġmotivate": 35065, + "Ġreflections": 35066, + "Null": 35067, + "ĠLobby": 35068, + "¥µ": 35069, + "ĠSATA": 35070, + "ĠBackup": 35071, + "Ñĥ": 35072, + "nin": 35073, + "ĠCorrection": 35074, + "Ġjuicy": 35075, + "utra": 35076, + "ĠPric": 35077, + "Ġrestraining": 35078, + "ĠAirbnb": 35079, + "ĠArrest": 35080, + "Ġappropriations": 35081, + "Ġslopes": 35082, + "Ġmanslaughter": 35083, + "Ġworkings": 35084, + "ĠHuss": 35085, + "ĠFrey": 35086, + "Leave": 35087, + "ĠHarmony": 35088, + "ĠFeder": 35089, + "Ġ430": 35090, + "Ġtrench": 35091, + "Ġgladly": 35092, + "Ġbullpen": 35093, + "ĠGau": 35094, + "bones": 35095, + "Ġgroove": 35096, + "Ġpretext": 35097, + "ãħĭ": 35098, + "Ġtransmitter": 35099, + "ĠComponent": 35100, + "Ġunderage": 35101, + "ĠEmpires": 35102, + "Tile": 35103, + "Ġoy": 35104, + "ĠMarvin": 35105, + "ĠCAS": 35106, + "Ġbloss": 35107, + "Ġreplicated": 35108, + "ĠMariners": 35109, + "Marcus": 35110, + "ĠBlocks": 35111, + "Ġliberated": 35112, + "Ġbutterfly": 35113, + "Feel": 35114, + "Ġfermentation": 35115, + "Ġyoutube": 35116, + "Ġoffend": 35117, + "ĠTerm": 35118, + "resist": 35119, + "Ġcessation": 35120, + "Ġinsurgency": 35121, + "Ġbir": 35122, + "ĠRaise": 35123, + "595": 35124, + "Ġhypotheses": 35125, + "502": 35126, + "Ġplaque": 35127, + "ocrat": 35128, + "Ġjackets": 35129, + "ĠHuffPost": 35130, + "among": 35131, + "Ġconfer": 35132, + "487": 35133, + "ĠLilly": 35134, + "Ġadapting": 35135, + "ĠFay": 35136, + "Ġshoved": 35137, + "vec": 35138, + "Ġrefine": 35139, + "Ġgon": 35140, + "Ġgunmen": 35141, + "zai": 35142, + "ĠShuttle": 35143, + "ĠIzan": 35144, + "Ġ1913": 35145, + "Ġplethora": 35146, + "··": 35147, + "Ġ510": 35148, + "Ġpuberty": 35149, + "Ġ241": 35150, + "ĠWealth": 35151, + "ĠAlma": 35152, + "ĠMEM": 35153, + "ĠAdults": 35154, + "Cas": 35155, + "prison": 35156, + "Race": 35157, + "Ġwaterproof": 35158, + "Ġathleticism": 35159, + "Ġcapitalize": 35160, + "ĠJuice": 35161, + "Ġilluminated": 35162, + "ĠPascal": 35163, + "Ġirritation": 35164, + "ĠWitnesses": 35165, + "adle": 35166, + "ĠAstro": 35167, + "Ġfax": 35168, + "ĠElvis": 35169, + "Primary": 35170, + "ĠLich": 35171, + "ĠElves": 35172, + "Ġresiding": 35173, + "Ġstumble": 35174, + "319": 35175, + "ĠPKK": 35176, + "Ġadversaries": 35177, + "DOS": 35178, + "ĠRitual": 35179, + "Ġsmear": 35180, + "Ġarson": 35181, + "idental": 35182, + "Ġscant": 35183, + "Ġmonarchy": 35184, + "Ġhalftime": 35185, + "Ġresidue": 35186, + "Ġindign": 35187, + "ĠShaun": 35188, + "ĠElm": 35189, + "auri": 35190, + "Aff": 35191, + "WATCH": 35192, + "ĠLyon": 35193, + "helps": 35194, + "361": 35195, + "Ġlobbyist": 35196, + "Ġdiminishing": 35197, + "Ġoutbreaks": 35198, + "Ġgoats": 35199, + "favorite": 35200, + "ĠNah": 35201, + "sonian": 35202, + "ĠBooster": 35203, + "Ġsandbox": 35204, + "ĠFare": 35205, + "ĠMalta": 35206, + "ĠattRot": 35207, + "ĠMOR": 35208, + "lde": 35209, + "Ġnavigating": 35210, + "Touch": 35211, + "Ġuntrue": 35212, + "ĠDisaster": 35213, + "Ġludicrous": 35214, + "Password": 35215, + "ĠJFK": 35216, + "blogspot": 35217, + "416": 35218, + "ĠUNDER": 35219, + "ernal": 35220, + "Ġdelaying": 35221, + "TOP": 35222, + "Ġimplants": 35223, + "ĠAVG": 35224, + "ĠHuge": 35225, + "attr": 35226, + "Ġjournalistic": 35227, + "ĠPeyton": 35228, + "ĠIA": 35229, + "Rap": 35230, + "goal": 35231, + "ĠProgramme": 35232, + "Ġsmashing": 35233, + "wives": 35234, + "println": 35235, + "ĠPlague": 35236, + "inus": 35237, + "EEP": 35238, + "Ġcruiser": 35239, + "ĠParish": 35240, + "uminium": 35241, + "Ġoccupants": 35242, + "ĠJihad": 35243, + "mop": 35244, + "Ġpint": 35245, + "Ġhect": 35246, + "ĠMecca": 35247, + "director": 35248, + "ĠFunding": 35249, + "ĠMixed": 35250, + "Ġstag": 35251, + "Tier": 35252, + "Ġgust": 35253, + "Ġbrightly": 35254, + "orsi": 35255, + "Ġuphill": 35256, + "RD": 35257, + "Ġlesions": 35258, + "ĠBundy": 35259, + "livious": 35260, + "Ġbiologist": 35261, + "ĠFaculty": 35262, + "ĠAuthorization": 35263, + "Ġ244": 35264, + "Allow": 35265, + "ï¸": 35266, + "ĠGiul": 35267, + "Ġpertinent": 35268, + "otaur": 35269, + "esse": 35270, + "ĠRoof": 35271, + "Ġunmanned": 35272, + "351": 35273, + "ĠShak": 35274, + "ĠOrient": 35275, + "Ġendanger": 35276, + "Dir": 35277, + "Ġreplen": 35278, + "edient": 35279, + "Ġtailor": 35280, + "Ġgadgets": 35281, + "Ġaudible": 35282, + "âĺĨ": 35283, + "Nice": 35284, + "Ġbombard": 35285, + "ĠRape": 35286, + "Ġdefiance": 35287, + "ĠTWO": 35288, + "ĠFilipino": 35289, + "Ġunaffected": 35290, + "ervatives": 35291, + "Ġsoared": 35292, + "ĠBolton": 35293, + "Ġcompromising": 35294, + "ĠBrewers": 35295, + "RAL": 35296, + "ĠAHL": 35297, + "icycle": 35298, + "Ġvampires": 35299, + "Ġdipped": 35300, + "oyer": 35301, + "ĠXIII": 35302, + "Ġsideways": 35303, + "ĠWaste": 35304, + "ĠDiss": 35305, + "ĠâĶľâĶĢâĶĢ": 35306, + "$.": 35307, + "Ġhabitats": 35308, + "ĠBeef": 35309, + "truth": 35310, + "trained": 35311, + "split": 35312, + "Rus": 35313, + "Andy": 35314, + "ĠBram": 35315, + "REP": 35316, + "pid": 35317, + "è£ħ": 35318, + "ĠMutant": 35319, + "Anim": 35320, + "ĠMarina": 35321, + "Ġfutile": 35322, + "highest": 35323, + "frequency": 35324, + "Ġepilepsy": 35325, + "Ġcoping": 35326, + "Ġconcise": 35327, + "Ġtracing": 35328, + "ĠSUN": 35329, + "panel": 35330, + "ĠSophie": 35331, + "ĠCrowley": 35332, + "ĠAdolf": 35333, + "ĠShooter": 35334, + "Ġshaky": 35335, + "ĠIG": 35336, + "ĠLies": 35337, + "ĠBarber": 35338, + "pkg": 35339, + "Ġuptake": 35340, + "Ġpredatory": 35341, + "ULTS": 35342, + "/**": 35343, + "Ġintoxicated": 35344, + "ĠWestbrook": 35345, + "odder": 35346, + "hement": 35347, + "Ġbaseman": 35348, + "APD": 35349, + "storage": 35350, + "ĠFifty": 35351, + "editor": 35352, + "GEN": 35353, + "UTION": 35354, + "irting": 35355, + "Ġsewing": 35356, + "rift": 35357, + "Ġagony": 35358, + "ĠSands": 35359, + "Ġ254": 35360, + "Cash": 35361, + "Ġlodge": 35362, + "Ġpunt": 35363, + "Natural": 35364, + "ĠIdeas": 35365, + "Ġerroneous": 35366, + "ĠSensor": 35367, + "ĠHannity": 35368, + "Ġ1921": 35369, + "Ġmould": 35370, + "ĠGon": 35371, + "kaya": 35372, + "Ġanonymously": 35373, + "ĠKEY": 35374, + "Ġsimulator": 35375, + "Winter": 35376, + "Ġstreamed": 35377, + "507": 35378, + "?\",": 35379, + "Ġteased": 35380, + "Ġcoefficient": 35381, + "Ġwartime": 35382, + "ĠTHR": 35383, + "''.": 35384, + "ĠBanking": 35385, + "mpire": 35386, + "Ġfandom": 35387, + "Ġlia": 35388, + "Ga": 35389, + "Ġdownhill": 35390, + "Ġinterpreting": 35391, + "Individual": 35392, + "Norm": 35393, + "Ġjealousy": 35394, + "bitcoin": 35395, + "Ġpleasures": 35396, + "ĠToys": 35397, + "ĠChevrolet": 35398, + "ĠAdvisor": 35399, + "IZE": 35400, + "Ġreceptions": 35401, + "706": 35402, + "Cro": 35403, + "Ġ262": 35404, + "Ġcitrus": 35405, + "iru": 35406, + "Reviewer": 35407, + "jected": 35408, + "UES": 35409, + "anz": 35410, + "1981": 35411, + "ĠWorker": 35412, + "Ġcomplied": 35413, + "orescent": 35414, + "continental": 35415, + "Ton": 35416, + "ĠPrism": 35417, + "ĠSheep": 35418, + "Ġ288": 35419, + "nox": 35420, + "ĠVog": 35421, + "Ord": 35422, + "Ġrealms": 35423, + "tek": 35424, + "Ġirrigation": 35425, + "Ġbicycles": 35426, + "Ġelectronically": 35427, + "poly": 35428, + "tall": 35429, + "());": 35430, + "Ġaesthetics": 35431, + "ĠIntegrated": 35432, + "Explore": 35433, + "Ġdunk": 35434, + "476": 35435, + "pain": 35436, + "ĠJacques": 35437, + "ĠDmit": 35438, + "Frames": 35439, + "Ġreunited": 35440, + "Ġhumid": 35441, + "Dro": 35442, + "Political": 35443, + "Ġyouthful": 35444, + "Ġentails": 35445, + "Ġmosquito": 35446, + "363": 35447, + "species": 35448, + "Ġcoordinating": 35449, + "ĠMayhem": 35450, + "ĠMagnus": 35451, + "Mount": 35452, + "Improved": 35453, + "ĠSTATE": 35454, + "ATTLE": 35455, + "Ġflowed": 35456, + "Ġtackled": 35457, + "Ġfashioned": 35458, + "Ġreorgan": 35459, + "ivari": 35460, + "finger": 35461, + "Ġreluctantly": 35462, + "etting": 35463, + "ĠVand": 35464, + "young": 35465, + "ĠGarland": 35466, + "Ġpresumption": 35467, + "Ġamenities": 35468, + "ĠPleasant": 35469, + "onential": 35470, + "ĠOxy": 35471, + "Ġmorals": 35472, + "ĠYah": 35473, + "Ready": 35474, + "Simon": 35475, + "Enh": 35476, + "Demon": 35477, + "Ġclich": 35478, + "Monitor": 35479, + "ĠDU": 35480, + "Ġwelcomes": 35481, + "Ġstandout": 35482, + "Ġdreadful": 35483, + "Ġbananas": 35484, + "Ġballoons": 35485, + "hooting": 35486, + "basic": 35487, + "Ġsuffix": 35488, + "Ġduly": 35489, + "cano": 35490, + "Chain": 35491, + "atos": 35492, + "Ġgeopolitical": 35493, + "Ġ(&": 35494, + "ĠGemini": 35495, + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ": 35496, + "Ġacquitted": 35497, + "Luck": 35498, + "protect": 35499, + "1024": 35500, + "Ġscarcity": 35501, + "Ġmindfulness": 35502, + "ecided": 35503, + "DN": 35504, + "prime": 35505, + "ĠPresidents": 35506, + "ĠVIDEO": 35507, + "Ġ(âĪĴ": 35508, + "addock": 35509, + "NOR": 35510, + "ĠPru": 35511, + "pun": 35512, + "ĠLOL": 35513, + "))))": 35514, + "ĠLiqu": 35515, + "ĠSAS": 35516, + "Ġstyling": 35517, + "Ġpunishments": 35518, + "Ġnumb": 35519, + "Ġascertain": 35520, + "ĠRockies": 35521, + "flu": 35522, + "Thumbnail": 35523, + "Ġperpetrated": 35524, + "ĠSemi": 35525, + "Ġdisarm": 35526, + "ĠOlder": 35527, + "ĠException": 35528, + "Ġexponentially": 35529, + "ĠCommunities": 35530, + "Ġabolish": 35531, + "ĠPartner": 35532, + "ptoms": 35533, + "Ġ777": 35534, + "ĠFoley": 35535, + "ĠCases": 35536, + "Ġgrease": 35537, + "ĠRebirth": 35538, + "Ground": 35539, + "Ġ;)": 35540, + "ĠDoctrine": 35541, + "ikini": 35542, + "Ye": 35543, + "ĠBlossom": 35544, + "Ġpersists": 35545, + "bill": 35546, + "Ġinfusion": 35547, + "Ġbuddies": 35548, + "911": 35549, + "ĠPatient": 35550, + "Ġdemos": 35551, + "Ġacquaintance": 35552, + "ĠPaw": 35553, + "atari": 35554, + "Ġxml": 35555, + "Ġfascination": 35556, + "ĠServe": 35557, + "ÏĤ": 35558, + "branded": 35559, + "Ġaz": 35560, + "Returns": 35561, + "Ġovershadow": 35562, + "Ġroam": 35563, + "Ġspeedy": 35564, + "numbered": 35565, + "helial": 35566, + "Ġdisciple": 35567, + "Ġassurances": 35568, + "given": 35569, + "pecting": 35570, + "ĠNatalie": 35571, + "çĶ°": 35572, + "Ġmosquitoes": 35573, + "rotein": 35574, + "Ġnumeric": 35575, + "Ġindependents": 35576, + "Ġtransitional": 35577, + "Ġreactionary": 35578, + "ĠMechdragon": 35579, + "doctor": 35580, + "Ġshortest": 35581, + "Ġsequential": 35582, + "ĠBac": 35583, + "ĠAccounts": 35584, + "ãģĮ": 35585, + "achy": 35586, + "ractive": 35587, + "ĠRegiment": 35588, + "Ġbreathtaking": 35589, + "fficiency": 35590, + "ĠBates": 35591, + "Ġ311": 35592, + "Ġwardrobe": 35593, + "fts": 35594, + "ĠBerk": 35595, + "Simply": 35596, + "ĠRiverside": 35597, + "ivering": 35598, + "idential": 35599, + "lucent": 35600, + "Ġenriched": 35601, + "ĠConver": 35602, + "ĠGiving": 35603, + "ãĥĻ": 35604, + "Ġlegalize": 35605, + "ĠFTC": 35606, + "Ġfreaking": 35607, + "Mix": 35608, + "Ġterrestrial": 35609, + "esian": 35610, + "cients": 35611, + "Wing": 35612, + "LOAD": 35613, + "Ġledge": 35614, + "ĠViolent": 35615, + "ĠMetall": 35616, + "Ġ308": 35617, + "Ġsoutheastern": 35618, + "hetto": 35619, + "Meat": 35620, + "Ġslowdown": 35621, + "Ġretreated": 35622, + "Jeremy": 35623, + "endas": 35624, + "*****": 35625, + "eric": 35626, + "Ġreins": 35627, + "oppable": 35628, + "ĠHumanity": 35629, + "earances": 35630, + "rigan": 35631, + "Camera": 35632, + "Ġwaivers": 35633, + "soc": 35634, + "Ġalteration": 35635, + "transform": 35636, + "ĠCemetery": 35637, + "506": 35638, + "Ġindefinite": 35639, + "Ġstimulating": 35640, + "yg": 35641, + "603": 35642, + "ĠSop": 35643, + "Ġdescriptive": 35644, + "Phase": 35645, + "ĠEdmund": 35646, + "Ġpneumonia": 35647, + "ventus": 35648, + "Amb": 35649, + "Ġlaboratories": 35650, + "ĠExclusive": 35651, + "ugar": 35652, + "Were": 35653, + "Ġmalfunction": 35654, + "Ġhomosexuals": 35655, + "Ġ-------": 35656, + "uni": 35657, + "Ġturbines": 35658, + "ĠEquity": 35659, + "Du": 35660, + "Ġminded": 35661, + "ĠRH": 35662, + "ĠBlackhawks": 35663, + "Ġfeats": 35664, + "Ġ1700": 35665, + "repl": 35666, + "362": 35667, + "laden": 35668, + "Ġindispensable": 35669, + "lyss": 35670, + "tti": 35671, + "Ġreel": 35672, + "Ġdiverted": 35673, + "Ġlikeness": 35674, + "Ġsubscriptions": 35675, + "Ġfingert": 35676, + "Ġfilthy": 35677, + "destruct": 35678, + "draft": 35679, + "ĠBernardino": 35680, + "launch": 35681, + "Ġperplex": 35682, + "ĠSUM": 35683, + "carb": 35684, + "Ġsweater": 35685, + "ĠVenture": 35686, + "ĠJag": 35687, + "ĠCeleb": 35688, + "ĠVoters": 35689, + "Ġsteadfast": 35690, + "Ġathletics": 35691, + "ĠHanson": 35692, + "ĠDrac": 35693, + "Tracker": 35694, + "Ġcommend": 35695, + "ĠPresidency": 35696, + "ĠDID": 35697, + "informed": 35698, + "Ġwebpage": 35699, + "Pretty": 35700, + "Ġforcefully": 35701, + "ãĥĥãĤ¯": 35702, + "Ġrelocation": 35703, + "Ġsatire": 35704, + "âī": 35705, + "ĠSunderland": 35706, + "æĦ": 35707, + "Voice": 35708, + "????????": 35709, + "Ġinformant": 35710, + "Ġbowel": 35711, + "ĠUniform": 35712, + "Ġ...\"": 35713, + "Ġpurge": 35714, + "Ġpicnic": 35715, + "ĠUmb": 35716, + "ĠUPDATE": 35717, + "ĠSapphire": 35718, + "ĠStall": 35719, + "learn": 35720, + "Ġobjectively": 35721, + "Ġobliter": 35722, + "Ġloophole": 35723, + "Ġjourneys": 35724, + "Ġomission": 35725, + "Pros": 35726, + "ĠSidney": 35727, + "ploma": 35728, + "Ġsprayed": 35729, + "Ġguru": 35730, + "Ġtraitor": 35731, + "Ġtimet": 35732, + "Ġsnapping": 35733, + "ĠSevent": 35734, + "urnal": 35735, + "ĠUkip": 35736, + "Ġbowed": 35737, + "poral": 35738, + "liberal": 35739, + "Ros": 35740, + "Questions": 35741, + "iOS": 35742, + "Ġsummarize": 35743, + "STAT": 35744, + "Ġ1850": 35745, + "apest": 35746, + "Ġlender": 35747, + "ĠVariable": 35748, + "bringing": 35749, + "ĠLORD": 35750, + ",)": 35751, + "Ġcollapses": 35752, + "xiety": 35753, + "ĠNed": 35754, + "YD": 35755, + "ĠScha": 35756, + "Ġantibody": 35757, + "Ġdisband": 35758, + "yre": 35759, + "illusion": 35760, + "Ġrover": 35761, + "shed": 35762, + "ĠHirosh": 35763, + "cci": 35764, + "Ġcalam": 35765, + "ĠMorton": 35766, + "Pinterest": 35767, + "Ġ1928": 35768, + "ĠEuras": 35769, + "ordes": 35770, + "Ġfences": 35771, + "ĠInventory": 35772, + "ĠValencia": 35773, + "ĠUd": 35774, + "ĠTiff": 35775, + "Ġsque": 35776, + "Ġquotation": 35777, + "Ġtroublesome": 35778, + "erker": 35779, + "QUEST": 35780, + "ĠKingdoms": 35781, + "south": 35782, + "Ġlevy": 35783, + "Prince": 35784, + "ĠSting": 35785, + "Ġnicknamed": 35786, + "Ġappe": 35787, + "Ġphotographic": 35788, + "Ġcorpus": 35789, + "reference": 35790, + "ĠTrog": 35791, + "Unt": 35792, + ")=(": 35793, + "ĠLatvia": 35794, + "Ġactivating": 35795, + "Ġlicensee": 35796, + "Ġdisparities": 35797, + "ĠNewsletter": 35798, + "ãĥĥãĥĪ": 35799, + "Ġfreeing": 35800, + "ĠJeep": 35801, + "ĠPerception": 35802, + "insk": 35803, + "Ġsilicone": 35804, + "ĠHayden": 35805, + "Lean": 35806, + "ĠSuzuki": 35807, + "ibrarian": 35808, + "668": 35809, + "Ġspor": 35810, + "Ġcorrelations": 35811, + "aghetti": 35812, + "Ġtuber": 35813, + "ĠIPCC": 35814, + "ilus": 35815, + "ĠVu": 35816, + "Ġwealthiest": 35817, + "ĠCarbuncle": 35818, + "anza": 35819, + "Ġfooled": 35820, + "ĠZur": 35821, + "Ġdaddy": 35822, + "rano": 35823, + "ilian": 35824, + "Ġknockout": 35825, + "fman": 35826, + "required": 35827, + "ĠWikileaks": 35828, + "ĠDuffy": 35829, + "ONT": 35830, + "Ġinsol": 35831, + "ĠObjects": 35832, + "Ġbou": 35833, + "ĠNordic": 35834, + "ĠInsert": 35835, + "scan": 35836, + "Ġdancers": 35837, + "Ġidiots": 35838, + "majority": 35839, + "ĠNeville": 35840, + "ĠFreeBSD": 35841, + "Ġtart": 35842, + "panic": 35843, + "690": 35844, + "Ġcocoa": 35845, + "Ġsampled": 35846, + "Ġlookup": 35847, + "Indust": 35848, + "Ġinjections": 35849, + "genre": 35850, + "Ġau": 35851, + "Ġroadway": 35852, + "Ġgenitals": 35853, + "Kind": 35854, + "ĠExaminer": 35855, + "ĠYaz": 35856, + "Fresh": 35857, + "Ġparalysis": 35858, + "ĠAluminum": 35859, + "Ġreap": 35860, + "oké": 35861, + "Ġsloppy": 35862, + "ĠTunnel": 35863, + "posium": 35864, + "nery": 35865, + "enic": 35866, + "Ġherbal": 35867, + "ĠOuter": 35868, + "ĠBuilder": 35869, + "Ġincur": 35870, + "Ġideologies": 35871, + "Ġbackups": 35872, + "consuming": 35873, + "ĠDetect": 35874, + "deck": 35875, + "ĠKNOW": 35876, + "ĠGret": 35877, + "ĠMIC": 35878, + "Ġtoughness": 35879, + "ĠExhibit": 35880, + "Ġhive": 35881, + "Les": 35882, + "ĠSCHOOL": 35883, + "ĠAtari": 35884, + "alde": 35885, + "ĠNull": 35886, + "andestine": 35887, + "mouse": 35888, + "Ġbrigade": 35889, + "489": 35890, + "Ġrevol": 35891, + "ĠLawson": 35892, + "ĠWah": 35893, + "opoly": 35894, + "ebted": 35895, + "ĠSaunders": 35896, + "Ġ313": 35897, + "ĠWinc": 35898, + "Ġtaboo": 35899, + "ĠHelmet": 35900, + "Ġwedge": 35901, + "chip": 35902, + "ĠTina": 35903, + "bg": 35904, + "Ġinfuri": 35905, + "rn": 35906, + "Ġanomalies": 35907, + "ĠSync": 35908, + "ĠExam": 35909, + "ĠCommit": 35910, + "ĠDiary": 35911, + "ĠALSO": 35912, + "ĠDebor": 35913, + "omedical": 35914, + "Ġcomprehension": 35915, + "655": 35916, + "Ġempowering": 35917, + "Ġire": 35918, + "Ġjuices": 35919, + "ĠETH": 35920, + "ĠBoxing": 35921, + "=\"/": 35922, + "Ġfacilitated": 35923, + "poke": 35924, + "ĠParsons": 35925, + "ĠModer": 35926, + "travel": 35927, + "Ġcivilizations": 35928, + "Ġlibertarians": 35929, + "Ġrune": 35930, + "ĠClarks": 35931, + "athed": 35932, + "Ġcampaigners": 35933, + "ĠDispatch": 35934, + "ĠFahrenheit": 35935, + "ĠCapcom": 35936, + "----------": 35937, + "Ġlace": 35938, + "Ġdraining": 35939, + "Ġliner": 35940, + "ĠArtificial": 35941, + "én": 35942, + "task": 35943, + "]).": 35944, + "ĠGMO": 35945, + "ĠOperator": 35946, + "ordinary": 35947, + "ĠInfluence": 35948, + "ĠUps": 35949, + "Ġpotency": 35950, + "ussen": 35951, + "ospons": 35952, + "ĠSwim": 35953, + "ĠDeadline": 35954, + "Unity": 35955, + "Ġculinary": 35956, + "Ġenlightenment": 35957, + "Ġwearer": 35958, + "Ġmined": 35959, + "Ġply": 35960, + "Ġincest": 35961, + "ĠDVDs": 35962, + "Walk": 35963, + "BTC": 35964, + "Trade": 35965, + "Ġdeval": 35966, + "iband": 35967, + "ĠOversight": 35968, + "Palestinian": 35969, + "Ġdart": 35970, + "Ġmul": 35971, + "LR": 35972, + "Ġremovable": 35973, + "ĠRealms": 35974, + "ìĿ": 35975, + "Ġmiscar": 35976, + "ĠVulkan": 35977, + "685": 35978, + "ère": 35979, + "ĠSap": 35980, + "Ġmerging": 35981, + "ĠCarly": 35982, + "chester": 35983, + "Ġbrisk": 35984, + "Ġluxurious": 35985, + "ĠGenerator": 35986, + "Ġbitterness": 35987, + "Ġedible": 35988, + "Ġ243": 35989, + "TG": 35990, + "Ġrectangle": 35991, + "WithNo": 35992, + "below": 35993, + "Jenn": 35994, + "Ġdarkest": 35995, + "Ġhitch": 35996, + "Ġdosage": 35997, + "Ġscaven": 35998, + "ĠKeller": 35999, + "ĠIllustrated": 36000, + "Certainly": 36001, + "ĠMavericks": 36002, + "Marginal": 36003, + "Ġdiarrhea": 36004, + "Ġenormously": 36005, + "Ġ999": 36006, + "shr": 36007, + "quart": 36008, + "Ġadamant": 36009, + "ĠMew": 36010, + "Ġrenovation": 36011, + "Ġcervical": 36012, + "ĠPercentage": 36013, + "eners": 36014, + "ĠKimber": 36015, + "Ġfloats": 36016, + "Ġdex": 36017, + "ĠWitcher": 36018, + "ĠSwansea": 36019, + "dm": 36020, + "Ġsalty": 36021, + "yellow": 36022, + "Ġcape": 36023, + "ĠDrain": 36024, + "ĠPaula": 36025, + "ĠToledo": 36026, + "lesi": 36027, + "Magazine": 36028, + "ĠWick": 36029, + "ĠMn": 36030, + "ĠAck": 36031, + "ĠRiding": 36032, + "ASON": 36033, + "Ġhomophobic": 36034, + "ARP": 36035, + "Ġwandered": 36036, + "CPU": 36037, + "oodoo": 36038, + "ĠPipe": 36039, + "Ġtightening": 36040, + "ĠButt": 36041, + "318": 36042, + "Ġdeserted": 36043, + "Session": 36044, + "Ġfacilitating": 36045, + "Jump": 36046, + "Ġemergencies": 36047, + "OWER": 36048, + "Ġexhaustive": 36049, + "ĠAFTER": 36050, + "Ġheartbeat": 36051, + "ĠLabel": 36052, + "acky": 36053, + "ĠCertified": 36054, + "iltration": 36055, + "Ze": 36056, + "ĠUtt": 36057, + "Ġ1300": 36058, + "Ġpresume": 36059, + "ĠDisp": 36060, + "Ġsurged": 36061, + "Ġdolls": 36062, + "Columb": 36063, + "Ġchimpan": 36064, + "ĠRazor": 36065, + "Ġticks": 36066, + "Ġcouncillor": 36067, + "Ġpilgrimage": 36068, + "ĠRebels": 36069, + "ĠQC": 36070, + "ĠAuction": 36071, + "xia": 36072, + "ikk": 36073, + "bred": 36074, + "Ġinsertion": 36075, + "Ġcoarse": 36076, + "dB": 36077, + "SEE": 36078, + "ĠZap": 36079, + "ĠFoo": 36080, + "Ġcontempor": 36081, + "ĠQuarterly": 36082, + "otions": 36083, + "ĠAlchemist": 36084, + "ĠTrey": 36085, + "ĠDuo": 36086, + "Sweet": 36087, + "804": 36088, + "ĠGiov": 36089, + "Ġfunn": 36090, + "Nin": 36091, + "hoff": 36092, + "Ġramifications": 36093, + "Ġ1922": 36094, + "ĠExperts": 36095, + "azes": 36096, + "Ġgarments": 36097, + "arial": 36098, + "ĠNab": 36099, + "Ġ257": 36100, + "ĠVed": 36101, + "Ġhumorous": 36102, + "ĠPompe": 36103, + "Ġnylon": 36104, + "Ġlurking": 36105, + "ĠSergey": 36106, + "ĠMattis": 36107, + "Ġmisogyny": 36108, + "ĠComponents": 36109, + "ĠWatching": 36110, + "ĠFolk": 36111, + "ractical": 36112, + "Bush": 36113, + "Ġtaped": 36114, + "Ġgrouping": 36115, + "Ġbeads": 36116, + "Ġ2048": 36117, + "Ġcondu": 36118, + "querque": 36119, + "Reading": 36120, + "Ġgrievances": 36121, + "Ultra": 36122, + "Ġendpoint": 36123, + "Hig": 36124, + "ĠStatic": 36125, + "ĠScarborough": 36126, + "Lua": 36127, + "ĠMessi": 36128, + "aqu": 36129, + "ĠPsyNet": 36130, + "ĠRudd": 36131, + "Ġavenue": 36132, + "vp": 36133, + "Jer": 36134, + "Ġshady": 36135, + "ĠResist": 36136, + "ĠArtemis": 36137, + "Ġcareless": 36138, + "Ġbrokers": 36139, + "Ġtemperament": 36140, + "Ġ520": 36141, + "Tags": 36142, + "ĠTurning": 36143, + "Ġuttered": 36144, + "Ġpedd": 36145, + "Ġimprovised": 36146, + "Ġ:(": 36147, + "Ġtabl": 36148, + "Ġplains": 36149, + "1600": 36150, + "pressure": 36151, + "ĠEssence": 36152, + "margin": 36153, + "friends": 36154, + "ĠRestoration": 36155, + "Ġpollut": 36156, + "ĠPoker": 36157, + "ĠAugustine": 36158, + "ĠCIS": 36159, + "ĠSEAL": 36160, + "orama": 36161, + "Ġthwart": 36162, + "seek": 36163, + "Ġpagan": 36164, + "º": 36165, + "cpu": 36166, + "Ġgarn": 36167, + "Ġassortment": 36168, + "ĠILCS": 36169, + "tower": 36170, + "Recommended": 36171, + "Ġunborn": 36172, + "ĠRandomRedditor": 36173, + "ĠRandomRedditorWithNo": 36174, + "Ġparalyzed": 36175, + "Ġeruption": 36176, + "Ġintersect": 36177, + "ĠStoke": 36178, + "ĠSco": 36179, + "Bind": 36180, + "å¾": 36181, + "ĠPNG": 36182, + "ĠNegative": 36183, + "ĠNOAA": 36184, + "Leon": 36185, + "Ġalloy": 36186, + "ĠLama": 36187, + "ĠDiversity": 36188, + "575": 36189, + "Ġunderestimated": 36190, + "ĠScor": 36191, + "Ġmural": 36192, + "Ġbusted": 36193, + "soon": 36194, + "lif": 36195, + "Ġnonex": 36196, + "Ġallergy": 36197, + "ĠUnderworld": 36198, + "ĠRays": 36199, + "ĠBlasio": 36200, + "Ġhrs": 36201, + "ĠDir": 36202, + "Ġ327": 36203, + "byter": 36204, + "Ġreplacements": 36205, + "Ġactivates": 36206, + "rived": 36207, + "MH": 36208, + "Ġpans": 36209, + "ĠHI": 36210, + "Ġlongitudinal": 36211, + "Ġnuisance": 36212, + "aler": 36213, + "Ġswell": 36214, + "ĠSigned": 36215, + "sci": 36216, + "ĠIsles": 36217, + "ĠAGA": 36218, + "Ġdefiant": 36219, + "Ġsonic": 36220, + "ocon": 36221, + "KC": 36222, + "ĠAim": 36223, + "tie": 36224, + "ahah": 36225, + "ĠmL": 36226, + "DX": 36227, + "Ġbisc": 36228, + "ĠBillboard": 36229, + "ĠSYSTEM": 36230, + "NEY": 36231, + "gaard": 36232, + "Ġdistressed": 36233, + "formerly": 36234, + "Alan": 36235, + "Ġchefs": 36236, + "Ġoptics": 36237, + "ĠComet": 36238, + "ĠAMC": 36239, + "Ġredesigned": 36240, + "irmation": 36241, + "Ġsightings": 36242, + "382": 36243, + "311": 36244, + "ĠWB": 36245, + "Ġcontraction": 36246, + "ĠTOTAL": 36247, + "Dual": 36248, + "Ġstartled": 36249, + "Ġunderstandably": 36250, + "Ġsunglasses": 36251, + "ETHOD": 36252, + "Ġdocker": 36253, + "Ġsurfing": 36254, + "ĠHEL": 36255, + "ĠSlack": 36256, + "tones": 36257, + "Ġshalt": 36258, + "Visual": 36259, + "498": 36260, + "Department": 36261, + "cussion": 36262, + "Ġunrestricted": 36263, + "Ġtad": 36264, + "Ġrename": 36265, + "employed": 36266, + "Ġeducating": 36267, + "Ġgrinned": 36268, + "bedroom": 36269, + "ĠActivities": 36270, + "ĠVelvet": 36271, + "ĠSWAT": 36272, + "Ġshuffle": 36273, + "igor": 36274, + "Ġsaturation": 36275, + "Finding": 36276, + "cream": 36277, + "icter": 36278, + "Ġvodka": 36279, + "tracking": 36280, + "tec": 36281, + "Ġforeground": 36282, + "iesta": 36283, + "Ġvehement": 36284, + "ĠECB": 36285, + "ĠTie": 36286, + "Ey": 36287, + "Ġturtles": 36288, + "ĠRailroad": 36289, + "ĠKatz": 36290, + "ĠFrames": 36291, + "Ġmenace": 36292, + "ĠFellowship": 36293, + "ĠEssential": 36294, + "uggish": 36295, + "Ġdrip": 36296, + "chwitz": 36297, + "ĠKyoto": 36298, + "sb": 36299, + "ĠNina": 36300, + "Parameter": 36301, + "Ġalarms": 36302, + "ĠClaud": 36303, + "Ġpioneering": 36304, + "Ġchiefly": 36305, + "ĠScream": 36306, + "Collection": 36307, + "Ġthankfully": 36308, + "ĠRonaldo": 36309, + "åŃIJ": 36310, + "strip": 36311, + "ĠDisneyland": 36312, + "commercial": 36313, + "Seeing": 36314, + "Soul": 36315, + "Ġevacuate": 36316, + "Ġciv": 36317, + "ĠAshe": 36318, + "Ġdivides": 36319, + "ĠDagger": 36320, + "rehensive": 36321, + "Ġberries": 36322, + "ĠDF": 36323, + "Ġsushi": 36324, + "Ġplurality": 36325, + "WI": 36326, + "Ġdisadvantaged": 36327, + "Ġbattalion": 36328, + "obiles": 36329, + "451": 36330, + "Ġcling": 36331, + "Ġundeniable": 36332, + "ĠLounge": 36333, + "Ġhaunt": 36334, + "phe": 36335, + "Ġquantify": 36336, + "Ġdiffered": 36337, + "Ġ[*]": 36338, + "ĠViz": 36339, + "cum": 36340, + "slave": 36341, + "Ġvideog": 36342, + "Ġquar": 36343, + "Ġbundles": 36344, + "ĠAlonso": 36345, + "tackle": 36346, + "Ġneuronal": 36347, + "Ġlandslide": 36348, + "confirmed": 36349, + "ĠDepth": 36350, + "Ġrenewables": 36351, + "Bear": 36352, + "ĠMacedonia": 36353, + "Ġjerseys": 36354, + "Ġbunk": 36355, + "ĠSpawn": 36356, + "ĠControls": 36357, + "ĠBuchanan": 36358, + "Ġrobotics": 36359, + "Ġemphasizing": 36360, + "ĠTutorial": 36361, + "hyp": 36362, + "iston": 36363, + "Ġmonumental": 36364, + "æ°": 36365, + "ĠCarry": 36366, + "Ġtbsp": 36367, + "enance": 36368, + "Hill": 36369, + "arthed": 36370, + "Ġrotten": 36371, + "Dean": 36372, + "Ġtwisting": 36373, + "Ġgoodwill": 36374, + "Ġimmersion": 36375, + "Living": 36376, + "Ġbrushes": 36377, + "ĠCGI": 36378, + "ĠAtk": 36379, + "traditional": 36380, + "Ġphantom": 36381, + "ĠStamina": 36382, + "Ġexpansions": 36383, + "ĠMarin": 36384, + "Ġembarked": 36385, + "ĠEg": 36386, + "intestinal": 36387, + "ĠPEOPLE": 36388, + "ĠBooth": 36389, + "ĠAppalach": 36390, + "Ġrelegated": 36391, + "VT": 36392, + "MIT": 36393, + "Ġmuster": 36394, + "Ġwithdrawing": 36395, + "Ġmicroscope": 36396, + "ĠGathering": 36397, + "ĠCrescent": 36398, + "ĠArgentine": 36399, + "ĠDecre": 36400, + "ĠDominic": 36401, + "Ġbuds": 36402, + "antage": 36403, + "ĠIon": 36404, + "Ġwidened": 36405, + "ONSORED": 36406, + "ĠGloves": 36407, + "iannopoulos": 36408, + "razen": 36409, + "feel": 36410, + "Ġrepayment": 36411, + "Ġhindsight": 36412, + "ĠREALLY": 36413, + "ĠPistol": 36414, + "ĠBrah": 36415, + "Ġwatts": 36416, + "Ġsurvives": 36417, + "Ġflurry": 36418, + "issy": 36419, + "Alert": 36420, + "ĠUruguay": 36421, + "Phoenix": 36422, + "Slow": 36423, + "ĠGrave": 36424, + "ĠFir": 36425, + "Ġmanageable": 36426, + "Ġtariff": 36427, + "ĠUDP": 36428, + "ĠPistons": 36429, + "ĠNigerian": 36430, + "Ġstrikeouts": 36431, + "Ġcosmetics": 36432, + "whelming": 36433, + "fab": 36434, + "cape": 36435, + "proxy": 36436, + "Ġrethink": 36437, + "Ġovercoming": 36438, + "simple": 36439, + "Ġwoo": 36440, + "Ġdistracting": 36441, + "ĠStanton": 36442, + "ĠTulsa": 36443, + "ĠDock": 36444, + "659": 36445, + "Ġdiscord": 36446, + "ĠEmacs": 36447, + "ĠVes": 36448, + "ĠROB": 36449, + "Ġreassuring": 36450, + "Ġconsortium": 36451, + "Muslims": 36452, + "321": 36453, + "Ġprompts": 36454, + "sei": 36455, + "ĠHitch": 36456, + "imposed": 36457, + "ĠFool": 36458, + "Ġindiscrim": 36459, + "wrong": 36460, + "buquerque": 36461, + "Davis": 36462, + "!]": 36463, + "Ġtimeless": 36464, + "ĠNEED": 36465, + "Ġpesticide": 36466, + "Ġrallying": 36467, + "ĠCalder": 36468, + "Ġå¤": 36469, + "Ġxp": 36470, + "ĠUnle": 36471, + "ĠExport": 36472, + "luaj": 36473, + "Buff": 36474, + ")[": 36937, + "Ġsqor": 36938, + "Saudi": 36939, + "Ġistg": 36940, + "Ġindulge": 36941, + "proc": 36942, + "Ġdisgusted": 36943, + "Ġcompounded": 36944, + "Ġnem": 36945, + "Ġschooling": 36946, + "ĠCure": 36947, + "processing": 36948, + "Sol": 36949, + "Ġproverb": 36950, + "itized": 36951, + "ĠAlvarez": 36952, + "Ġscarf": 36953, + "Ġrectangular": 36954, + "reve": 36955, + "Ġhormonal": 36956, + "ĠStress": 36957, + "itizen": 36958, + "Ġ425": 36959, + "girls": 36960, + "ĠNoir": 36961, + "ĠRapp": 36962, + "Ġmarches": 36963, + "church": 36964, + "ĠUses": 36965, + "Ġ405": 36966, + "ĠBerm": 36967, + "Ġordinances": 36968, + "ĠJudgment": 36969, + "Charges": 36970, + "ĠZin": 36971, + "Ġdusty": 36972, + "Ġstrawberries": 36973, + "Ġperce": 36974, + "ĠThur": 36975, + "ĠDeborah": 36976, + "netflix": 36977, + "ĠLambert": 36978, + "Ġamused": 36979, + "ĠGuang": 36980, + "YOU": 36981, + "RGB": 36982, + "ĠCCTV": 36983, + "Ġfiat": 36984, + "rang": 36985, + "Ġfederation": 36986, + "ĠMant": 36987, + "ĠBust": 36988, + "ĠMare": 36989, + "respective": 36990, + "ĠMigration": 36991, + "ĠBIT": 36992, + "590": 36993, + "Ġpatriotism": 36994, + "Ġoutlining": 36995, + "region": 36996, + "ĠJosé": 36997, + "Ġblasting": 36998, + "ĠEzra": 36999, + "Bs": 37000, + "Ġundermines": 37001, + "ĠSmooth": 37002, + "Ġclashed": 37003, + "radio": 37004, + "Ġtransitioning": 37005, + "ĠBuccaneers": 37006, + "ĠOwl": 37007, + "Ġplugs": 37008, + "Ġhiatus": 37009, + "ĠPinball": 37010, + "Ġmig": 37011, + "ĠNutr": 37012, + "ĠWolfe": 37013, + "Ġintegers": 37014, + "Ġorbits": 37015, + "ĠEdwin": 37016, + "ĠDirectX": 37017, + "bite": 37018, + "Ġblazing": 37019, + "vr": 37020, + "Edge": 37021, + "ĠPID": 37022, + "exit": 37023, + "ĠComed": 37024, + "ĠPathfinder": 37025, + "ĠGuid": 37026, + "ĠSigns": 37027, + "ĠZer": 37028, + "ĠAgenda": 37029, + "Ġreimbursement": 37030, + "Mesh": 37031, + "iPhone": 37032, + "ĠMarcos": 37033, + "ĠSites": 37034, + "hate": 37035, + "enburg": 37036, + "Ġsockets": 37037, + "pend": 37038, + "Batman": 37039, + "vir": 37040, + "ĠSHOW": 37041, + "Ġprovisional": 37042, + "conn": 37043, + "ĠDeaths": 37044, + "ATIVE": 37045, + "Profile": 37046, + "sym": 37047, + "JA": 37048, + "Ġninja": 37049, + "installed": 37050, + "idates": 37051, + "ebra": 37052, + "ĠOmaha": 37053, + "Ġseizing": 37054, + "ĠBeasts": 37055, + "Ġsalts": 37056, + "Mission": 37057, + "Generally": 37058, + "ĠTrilogy": 37059, + "heon": 37060, + "legates": 37061, + "Ġdime": 37062, + "Ġfaire": 37063, + "parable": 37064, + "Graph": 37065, + "Ġtotaling": 37066, + "Ġdiagrams": 37067, + "ĠYanuk": 37068, + "plet": 37069, + "ĠMeh": 37070, + "Ġmythical": 37071, + "ĠStephens": 37072, + "autical": 37073, + "ochemistry": 37074, + "Ġkilograms": 37075, + "Ġelbows": 37076, + "ancock": 37077, + "ĠBCE": 37078, + "ĠPrague": 37079, + "Ġimprov": 37080, + "ĠDevin": 37081, + "Ġ\"\\": 37082, + "paralle": 37083, + "Ġsupremacists": 37084, + "ĠBillion": 37085, + "Ġregimen": 37086, + "innacle": 37087, + "Ġrequisite": 37088, + "angan": 37089, + "ĠBurlington": 37090, + "ainment": 37091, + "ĠObjective": 37092, + "omsky": 37093, + "GV": 37094, + "Ġunilateral": 37095, + "Ġtc": 37096, + "Ġhires": 37097, + "mental": 37098, + "Ġinvoluntary": 37099, + "Ġtranspl": 37100, + "ĠASCII": 37101, + "¨": 37102, + "Events": 37103, + "Ġdoubted": 37104, + "ĠKaplan": 37105, + "ĠCourage": 37106, + "igon": 37107, + "ĠManaging": 37108, + "ĠTart": 37109, + "Ġfalsehood": 37110, + "ĠViolet": 37111, + "Ġairs": 37112, + "Ġfertilizer": 37113, + "Britain": 37114, + "Ġaquatic": 37115, + "ouf": 37116, + "Words": 37117, + "ĠHartford": 37118, + "Ġevenings": 37119, + "ĠVengeance": 37120, + "quite": 37121, + "Gall": 37122, + "ĠPret": 37123, + "Ġpdf": 37124, + "ĠLM": 37125, + "ĠSochi": 37126, + "ĠIntercept": 37127, + "920": 37128, + "Ġprofitability": 37129, + "ĠIdle": 37130, + "ĠMacDonald": 37131, + "ĠEstablishment": 37132, + "umsy": 37133, + "Ġgatherings": 37134, + "ĠNaj": 37135, + "Charlie": 37136, + "Ġascent": 37137, + "ĠProtector": 37138, + "Ġalgebra": 37139, + "Ġbios": 37140, + "forums": 37141, + "ELS": 37142, + "Introduced": 37143, + "Ġ335": 37144, + "Ġastronomy": 37145, + "Contribut": 37146, + "ĠPolic": 37147, + "Platform": 37148, + "Ġcontainment": 37149, + "wrap": 37150, + "Ġcoronary": 37151, + "ĠJelly": 37152, + "manager": 37153, + "Ġheartbreaking": 37154, + "cair": 37155, + "ĠChero": 37156, + "cgi": 37157, + "Medical": 37158, + "ĠAccountability": 37159, + "!!\"": 37160, + "ophile": 37161, + "Ġpsychotic": 37162, + "ĠRestrict": 37163, + "Ġequitable": 37164, + "issues": 37165, + "Ġ1905": 37166, + "ĠNek": 37167, + "cised": 37168, + "ĠTracking": 37169, + "Ġozone": 37170, + "Ġcooker": 37171, + "rosis": 37172, + "Ġreopen": 37173, + "Ġinfinity": 37174, + "ĠPharmaceutical": 37175, + "ensional": 37176, + "Attempt": 37177, + "ĠRory": 37178, + "Marco": 37179, + "Ġawaits": 37180, + "HOW": 37181, + "treated": 37182, + "Ġbolst": 37183, + "Ġrevered": 37184, + "Ġpods": 37185, + "oppers": 37186, + "0010": 37187, + "Ġamplitude": 37188, + "rican": 37189, + "SPONSORED": 37190, + "Ġtrousers": 37191, + "Ġhalves": 37192, + "ĠKaine": 37193, + "ĠCutler": 37194, + "ĠAUTH": 37195, + "Ġsplendid": 37196, + "Ġpreventive": 37197, + "ĠDudley": 37198, + "ifacts": 37199, + "uminati": 37200, + "ĠYin": 37201, + "Ġadmon": 37202, + "ĠVag": 37203, + "Ġinverted": 37204, + "Ġhastily": 37205, + "ĠHague": 37206, + "Lyn": 37207, + "Ġledger": 37208, + "Ġastronomical": 37209, + "getting": 37210, + "Ġcirca": 37211, + "ĠCic": 37212, + "ĠTennis": 37213, + "Limited": 37214, + "Ġdru": 37215, + "ĠBYU": 37216, + "Ġtravellers": 37217, + "Ġpane": 37218, + "ĠIntro": 37219, + "Ġpatiently": 37220, + "Ġaiding": 37221, + "Ġloos": 37222, + "ĠTough": 37223, + "Ġ293": 37224, + "Ġconsumes": 37225, + "SourceFile": 37226, + "Ġ\"\"\"": 37227, + "Ġbonding": 37228, + "Ġtilted": 37229, + "Ġmenstrual": 37230, + "ĠCelestial": 37231, + "ULAR": 37232, + "Plugin": 37233, + "Ġrisking": 37234, + "Naz": 37235, + "ĠRiyadh": 37236, + "Ġaccredited": 37237, + "Ġskirm": 37238, + "éĽ": 37239, + "Ġexaminer": 37240, + "Ġmessing": 37241, + "Ġnearing": 37242, + "ĠChern": 37243, + "ĠBeckham": 37244, + "Ġswapped": 37245, + "Ġgoose": 37246, + "Kay": 37247, + "Ġlofty": 37248, + "ĠWallet": 37249, + "Ġ['": 37250, + "Ġapocalypse": 37251, + "Ġbamboo": 37252, + "ĠSPACE": 37253, + "ĠElena": 37254, + "Ġ306": 37255, + "acons": 37256, + "Ġtightened": 37257, + "Ġadolescence": 37258, + "Ġrainy": 37259, + "Ġvandalism": 37260, + "ĠNewtown": 37261, + "Ġconject": 37262, + "cakes": 37263, + "Ġcheated": 37264, + "Ġmoderators": 37265, + "params": 37266, + "EFF": 37267, + "Ġdeceit": 37268, + "ĠSTL": 37269, + "ĠTanzania": 37270, + "ĠRI": 37271, + "Ġ1923": 37272, + "ĠExile": 37273, + "thel": 37274, + "Ġtheolog": 37275, + "Ġquirky": 37276, + "ĠIrvine": 37277, + "Ġneedy": 37278, + "oris": 37279, + "Um": 37280, + "Ka": 37281, + "Ġmailbox": 37282, + "322": 37283, + "Ġbos": 37284, + "ĠPetra": 37285, + "KING": 37286, + "Ġenlarged": 37287, + "Often": 37288, + "Ġbadass": 37289, + "Ġ343": 37290, + "ĠPlaces": 37291, + "ĠCAD": 37292, + "Ġpristine": 37293, + "Ġintervening": 37294, + "direction": 37295, + "Ġlaz": 37296, + "ĠDSM": 37297, + "Ġprojecting": 37298, + "ĠFunk": 37299, + "agog": 37300, + "payment": 37301, + "nov": 37302, + "Ġchatter": 37303, + "ARB": 37304, + "Ġexaminations": 37305, + "ĠHousehold": 37306, + "ĠGus": 37307, + "Ford": 37308, + "414": 37309, + "Boss": 37310, + "Ġmystic": 37311, + "Ġleaps": 37312, + "ĠBav": 37313, + "ulz": 37314, + "budget": 37315, + "Football": 37316, + "Ġsubsidized": 37317, + "Ġfirsthand": 37318, + "Ġcoincide": 37319, + "ocular": 37320, + "Conn": 37321, + "ĠCollabor": 37322, + "Ġfools": 37323, + "amura": 37324, + "ahar": 37325, + "rists": 37326, + "Ġswollen": 37327, + "Ġexpended": 37328, + "ĠPau": 37329, + "sup": 37330, + "Ġspar": 37331, + "Ġkeynote": 37332, + "suff": 37333, + "Ġunequal": 37334, + "Ġprogressing": 37335, + "strings": 37336, + "ĠGamergate": 37337, + "Disney": 37338, + "ĠEleven": 37339, + "omnia": 37340, + "Ġscripted": 37341, + "Ġearners": 37342, + "brother": 37343, + "ĠEnabled": 37344, + "æ³": 37345, + "Ġlarvae": 37346, + "ĠLOC": 37347, + "mess": 37348, + "Wilson": 37349, + "ĠTemplate": 37350, + "successfully": 37351, + "Ġparamount": 37352, + "Ġcamouflage": 37353, + "Ġbinds": 37354, + "ĠQuiet": 37355, + "ĠShutterstock": 37356, + "rush": 37357, + "Ġmascot": 37358, + "fortune": 37359, + "ĠColt": 37360, + "ĠBeyon": 37361, + "habi": 37362, + "Ġhairc": 37363, + "Ġ267": 37364, + "ĠDeus": 37365, + "Ġtwitch": 37366, + "Ġconcentrating": 37367, + "Ġnipples": 37368, + "cible": 37369, + "Ġgir": 37370, + "NZ": 37371, + "Math": 37372, + "nih": 37373, + "Required": 37374, + "Ġponder": 37375, + "ĠSAN": 37376, + "Ġweddings": 37377, + "Ġloneliness": 37378, + "NES": 37379, + "ĠMahjong": 37380, + "695": 37381, + "addle": 37382, + "ĠGarner": 37383, + "ĠCOUR": 37384, + "Bridge": 37385, + "Ġspree": 37386, + "ĠCaldwell": 37387, + "Ġbribery": 37388, + "Ġ��������": 37389, + "plugins": 37390, + "Ġracket": 37391, + "Ġchampagne": 37392, + "versible": 37393, + "Vote": 37394, + "Ġmodifiers": 37395, + "Mayor": 37396, + "680": 37397, + "Ġassemblies": 37398, + "ĠSultan": 37399, + "ĠNing": 37400, + "ĠLadies": 37401, + "Ġsulfur": 37402, + "Ġorbs": 37403, + "Ġ-----": 37404, + "_______": 37405, + "ĠJournalism": 37406, + "Ġesports": 37407, + "Ġlush": 37408, + "Ġhue": 37409, + "Ġspectral": 37410, + "Honest": 37411, + "ãĥı": 37412, + "Ġbushes": 37413, + "Ġreinforcement": 37414, + "Ġreopened": 37415, + "ĠWheels": 37416, + "ĠMorg": 37417, + "rieving": 37418, + "Ġauxiliary": 37419, + "ĠjQuery": 37420, + "ĠBAT": 37421, + "tesque": 37422, + "Ġvertex": 37423, + "pure": 37424, + "frey": 37425, + "ãĤº": 37426, + "dos": 37427, + "Ġtyph": 37428, + "Ġcull": 37429, + "Ġeq": 37430, + "Ġdecon": 37431, + "Ġtossing": 37432, + "Ġdisparate": 37433, + "ĠBrigham": 37434, + "printf": 37435, + "ledged": 37436, + "Ġsund": 37437, + "Ġcozy": 37438, + "Ġhepatitis": 37439, + "performing": 37440, + "Ġaval": 37441, + "ĠGG": 37442, + "future": 37443, + "Ġpetertodd": 37444, + "ĠKosovo": 37445, + "Ġmagnets": 37446, + "Already": 37447, + "ĠEdison": 37448, + "ĠCeres": 37449, + "ĠRAID": 37450, + "Ġbrilliance": 37451, + "576": 37452, + "Ġderives": 37453, + "Ġhypertension": 37454, + "ĠÎĶ": 37455, + "Ġlambda": 37456, + "Ġflair": 37457, + "Ġmissionaries": 37458, + "Ġrapes": 37459, + "ĠStarter": 37460, + "ĠMonths": 37461, + "Ġdefy": 37462, + "Ġseismic": 37463, + "ĠRaphael": 37464, + "Ġeurozone": 37465, + "656": 37466, + "zsche": 37467, + "Ġscratched": 37468, + "Ġbows": 37469, + "ĠLennon": 37470, + "ĠGaia": 37471, + "Ġdripping": 37472, + "facts": 37473, + "Ale": 37474, + "Ġfrogs": 37475, + "ĠBreast": 37476, + "ogeneity": 37477, + "ĠProsecutor": 37478, + "Ġamplified": 37479, + "ĠHodg": 37480, + "ĠFn": 37481, + "Thousands": 37482, + "ĠNIH": 37483, + "ĠMonitoring": 37484, + "FTWARE": 37485, + "ĠPriebus": 37486, + "ĠGrowing": 37487, + "hunter": 37488, + "Ġdiagnose": 37489, + "ĠMald": 37490, + "ĠLR": 37491, + "Ġcrowned": 37492, + "Ġbursting": 37493, + "Ġdissolution": 37494, + "javascript": 37495, + "Ġusefulness": 37496, + "ĠExecution": 37497, + ":(": 37498, + "ĠIvory": 37499, + "aah": 37500, + "Ġpersecuted": 37501, + "violence": 37502, + "istas": 37503, + "ĠCrate": 37504, + "Ġimpulses": 37505, + "ĠSpani": 37506, + "edes": 37507, + "Handle": 37508, + "ĠZerg": 37509, + "thinkable": 37510, + "Lastly": 37511, + "Ġspontaneously": 37512, + "Ġinconvenient": 37513, + "Ġdismissing": 37514, + "Ġplotted": 37515, + "Ġeighty": 37516, + "Ġ737": 37517, + "rish": 37518, + "ĠThornton": 37519, + "atham": 37520, + "Ġsitcom": 37521, + "Ven": 37522, + "Recipe": 37523, + "tel": 37524, + "lund": 37525, + "Ġclears": 37526, + "ĠSasuke": 37527, + "Ġ258": 37528, + "Ġopting": 37529, + "Ġenraged": 37530, + "esthetic": 37531, + "ĠAe": 37532, + "uchs": 37533, + "Prep": 37534, + "Flow": 37535, + "Ġrunoff": 37536, + "ĠEating": 37537, + "ĠGiles": 37538, + "ĠActing": 37539, + "resources": 37540, + "ibaba": 37541, + "Ġrpm": 37542, + "Ġskewed": 37543, + "ĠBlanc": 37544, + "ĠSakuya": 37545, + "Ġhotter": 37546, + "Ġ1924": 37547, + "opian": 37548, + "cko": 37549, + "Ġcrumbling": 37550, + "Ġcaptains": 37551, + "ĠAppropriations": 37552, + "leaders": 37553, + "dropping": 37554, + "anuts": 37555, + "Ġreversing": 37556, + "ĠPose": 37557, + "ĠSek": 37558, + "Scot": 37559, + "ĠIdea": 37560, + "cise": 37561, + "ĠSlovenia": 37562, + "Ġ317": 37563, + "Doctor": 37564, + "Ġcrocod": 37565, + "aldi": 37566, + "Sea": 37567, + "ĠFarrell": 37568, + "Ġmercenaries": 37569, + "ĠRNC": 37570, + "ĠGuess": 37571, + "Ġpacing": 37572, + "Machine": 37573, + "StreamerBot": 37574, + "ĠCharity": 37575, + "Ġ298": 37576, + "Ġcannons": 37577, + "ĠToby": 37578, + "TPPStreamerBot": 37579, + "ĠPassion": 37580, + "cfg": 37581, + "Thom": 37582, + "Ġbadges": 37583, + "ĠBernstein": 37584, + ".âĢĵ": 37585, + "ĠPOP": 37586, + "ĠConj": 37587, + "Ġinitialization": 37588, + "Ġbiodiversity": 37589, + "Dub": 37590, + "Ġfeudal": 37591, + "Ġdisclaimer": 37592, + "Ġcrow": 37593, + "Ġignition": 37594, + "arf": 37595, + "SHA": 37596, + "ĠkHz": 37597, + "hazard": 37598, + "ĠArtists": 37599, + "oeuv": 37600, + "679": 37601, + "ĠRudy": 37602, + "Nine": 37603, + "ĠRamadan": 37604, + "å½": 37605, + "itto": 37606, + "Ġadrenaline": 37607, + "Cert": 37608, + "Ġsmelled": 37609, + "Ġimpunity": 37610, + "Ġagendas": 37611, + "ĠReborn": 37612, + "ĠConcent": 37613, + "ĠSeems": 37614, + "Ġomega": 37615, + "ĠDustin": 37616, + "Ġbacker": 37617, + "ĠSauce": 37618, + "ĠBoyle": 37619, + "WIN": 37620, + "Ġspins": 37621, + "Ġpauses": 37622, + "upt": 37623, + "Ġshredded": 37624, + "Ġstrapped": 37625, + "ĠCorruption": 37626, + "Ġscratches": 37627, + "Ġni": 37628, + "Ġattire": 37629, + "ĠSAF": 37630, + "FactoryReloaded": 37631, + "ĠIPS": 37632, + "Ġ(%": 37633, + "Ġseminar": 37634, + "focus": 37635, + "civil": 37636, + "Ġ1860": 37637, + "intosh": 37638, + "Ġcontinual": 37639, + "Ġabbrevi": 37640, + "ĠSok": 37641, + "ocobo": 37642, + "XM": 37643, + "Ġfrantic": 37644, + "Ġunavoidable": 37645, + "Ġartery": 37646, + "Ġannotations": 37647, + "bath": 37648, + "Climate": 37649, + "Ġdors": 37650, + "ĠSlide": 37651, + "coord": 37652, + "ĠReload": 37653, + "ĠLDL": 37654, + "ĠLovecraft": 37655, + "Ġunimagin": 37656, + "Ġresembled": 37657, + "Ġbarracks": 37658, + "np": 37659, + "Ġsurrogate": 37660, + "Ġcategorized": 37661, + "ãĤ©": 37662, + "Ġvaccinated": 37663, + "Ġdrainage": 37664, + "Ġindist": 37665, + "ĠWhatsApp": 37666, + "Ġ1870": 37667, + "olerance": 37668, + "invoke": 37669, + "amorph": 37670, + "Ġreconnect": 37671, + "Ġemanc": 37672, + "Ġblindness": 37673, + "Ġ1280": 37674, + "internet": 37675, + "collar": 37676, + "Ġaltru": 37677, + "Ġabyss": 37678, + "ĠTRI": 37679, + "657": 37680, + "Ġinfused": 37681, + "HEAD": 37682, + "Ġforestry": 37683, + "ĠWoody": 37684, + "ĠCi": 37685, + "wi": 37686, + "sam": 37687, + "784": 37688, + "holiday": 37689, + "Ġmogul": 37690, + "ĠFees": 37691, + "ĠDEN": 37692, + "Internal": 37693, + "urbed": 37694, + "fusc": 37695, + "atom": 37696, + "ĠIllusion": 37697, + "Ġpolled": 37698, + "Ġflap": 37699, + "Ġcoax": 37700, + "LGBT": 37701, + "Analy": 37702, + "ĠSections": 37703, + "ĠCaliforn": 37704, + "emn": 37705, + "Ġhither": 37706, + "ĠNIGHT": 37707, + "Ġnailed": 37708, + "ĠPipeline": 37709, + "391": 37710, + "oof": 37711, + "ĠPrimal": 37712, + "verend": 37713, + "Ġslashing": 37714, + "Ġretri": 37715, + "aviour": 37716, + "Ġdeparting": 37717, + "gil": 37718, + "ISC": 37719, + "Ġmidway": 37720, + "Ġultrasound": 37721, + "Ġbehaving": 37722, + "ĠTara": 37723, + "classes": 37724, + "Virtual": 37725, + "ĠColonial": 37726, + "Ġstripping": 37727, + "Ġorchestrated": 37728, + "ĠGraves": 37729, + "452": 37730, + "ĠIronically": 37731, + "ĠWriters": 37732, + "Ġlends": 37733, + "ĠManz": 37734, + "Ġraven": 37735, + "Ġoxidative": 37736, + "Ġ266": 37737, + "ELF": 37738, + "actually": 37739, + "ascar": 37740, + "Draft": 37741, + "Ġfavourable": 37742, + "Ġhumiliating": 37743, + "Ġfidelity": 37744, + "ĠHof": 37745, + "ĠXuan": 37746, + "496": 37747, + "Ġlayered": 37748, + "atis": 37749, + "790": 37750, + "Ġpaycheck": 37751, + "iton": 37752, + "Kar": 37753, + "ĠVMware": 37754, + "ĠFarmer": 37755, + "Ġservic": 37756, + "glomer": 37757, + "Ġslump": 37758, + "ĠFabric": 37759, + "ĠDOC": 37760, + "esting": 37761, + "Ġreassure": 37762, + "Ġphyl": 37763, + "volt": 37764, + "itory": 37765, + "Rules": 37766, + "Ġoxidation": 37767, + "Ġprized": 37768, + "Ġmistress": 37769, + "ĠDjango": 37770, + "WARN": 37771, + "åij": 37772, + "Ġencode": 37773, + "ĠFeedback": 37774, + "Ġstupidity": 37775, + "Ian": 37776, + "ĠYugoslavia": 37777, + "ר": 37778, + "acl": 37779, + "UTE": 37780, + "1977": 37781, + "Ġqualifies": 37782, + "Ġpulses": 37783, + "pretty": 37784, + "Ġfroze": 37785, + "Ġss": 37786, + "Iterator": 37787, + "Ġurgently": 37788, + "Ġmailed": 37789, + "ĠCham": 37790, + "Ġsustaining": 37791, + "Ġbasil": 37792, + "Ġpuppies": 37793, + "ilant": 37794, + "ĠPLEASE": 37795, + "lap": 37796, + "aceous": 37797, + "Fear": 37798, + "ĠMastery": 37799, + "automatic": 37800, + "ĠTAG": 37801, + "Ġantim": 37802, + "agles": 37803, + "473": 37804, + "frames": 37805, + "Ġwhispers": 37806, + "ĠWhoever": 37807, + "Ġbravery": 37808, + "ĠUKIP": 37809, + "ractions": 37810, + "\"\"\"": 37811, + "Ġtame": 37812, + "Ġparted": 37813, + "everything": 37814, + "CONT": 37815, + "Ġindebted": 37816, + "Ġaddr": 37817, + "rek": 37818, + "IRED": 37819, + "Ġeminent": 37820, + "clinton": 37821, + "Ġousted": 37822, + "Ġreviewer": 37823, + "Ġmeltdown": 37824, + "Ġrearr": 37825, + "ĠYao": 37826, + "thereal": 37827, + "abyte": 37828, + "Ġstumbling": 37829, + "Ġbatches": 37830, + "Ġ259": 37831, + "Ġcontraceptive": 37832, + "Ġprostitute": 37833, + "ensis": 37834, + "Decl": 37835, + "ĠStrikes": 37836, + "Military": 37837, + "ĠOath": 37838, + "vacc": 37839, + "ppings": 37840, + "052": 37841, + "ĠpartName": 37842, + "amping": 37843, + "Reports": 37844, + "KI": 37845, + "CHR": 37846, + "Ġsubtly": 37847, + "swers": 37848, + "Blake": 37849, + "usual": 37850, + "Ġcontestants": 37851, + "Ġcartridges": 37852, + "ĠGREAT": 37853, + "Ġblush": 37854, + "ĠâĢº": 37855, + "472": 37856, + "Ġreasoned": 37857, + "ãĥ¤": 37858, + "paralleled": 37859, + "Ġdyn": 37860, + "agate": 37861, + "Ġnightly": 37862, + "åĨ": 37863, + "556": 37864, + "Ġsemantic": 37865, + "ĠAdvoc": 37866, + "Ġ!!": 37867, + "Ġdisagrees": 37868, + "ĠBW": 37869, + "Veh": 37870, + "Ġharming": 37871, + "Ġembraces": 37872, + "Ġstrives": 37873, + "Ġinland": 37874, + "ĠKard": 37875, + "Ġheats": 37876, + "ĠGinny": 37877, + "utan": 37878, + "ernaut": 37879, + "ylene": 37880, + "ĠElev": 37881, + "JD": 37882, + "Ġhars": 37883, + "ĠStarr": 37884, + "Ġskysc": 37885, + "Ġcollaborators": 37886, + "Usually": 37887, + "Ġrevolutions": 37888, + "ĠSTATS": 37889, + "Ġdismantle": 37890, + "Ġconfidently": 37891, + "Ġkinetic": 37892, + "Ali": 37893, + "Ġpercentile": 37894, + "Ġextracting": 37895, + "illian": 37896, + "estead": 37897, + "Ġphysicists": 37898, + "ĠMarshal": 37899, + "Ġfellowship": 37900, + "Ġdashed": 37901, + "ĠUR": 37902, + "ĠSioux": 37903, + "ĠCompact": 37904, + "amide": 37905, + "Python": 37906, + "ĠLeigh": 37907, + "ĠPharmac": 37908, + "istrates": 37909, + "herical": 37910, + "Ġfue": 37911, + "ĠEmin": 37912, + "Ġ({": 37913, + "ĠNeighborhood": 37914, + "Ġdisrupting": 37915, + "ĠDup": 37916, + "Ġgland": 37917, + "ĠSev": 37918, + "ĠMarian": 37919, + "argon": 37920, + "ĠDund": 37921, + "Ġ": 46904, + "ĠPhilips": 46905, + "ĠKafka": 46906, + "Ġupheaval": 46907, + "Ġsentimental": 46908, + "Ġsax": 46909, + "ĠAkira": 46910, + "serial": 46911, + "Matrix": 46912, + "Ġelecting": 46913, + "Ġcommenter": 46914, + "ĠNebula": 46915, + "plets": 46916, + "ĠNadu": 46917, + "ĠAdren": 46918, + "Ġenshr": 46919, + "ĠRAND": 46920, + "financial": 46921, + "ĠClyde": 46922, + "utherford": 46923, + "Ġsignage": 46924, + "Ġdeline": 46925, + "Ġphosphate": 46926, + "roversial": 46927, + "fascist": 46928, + "ĠVall": 46929, + "ĠBethlehem": 46930, + "Ġfors": 46931, + "Ġenglish": 46932, + "Solid": 46933, + "Nature": 46934, + "Ġva": 46935, + "ĠGuests": 46936, + "Ġtantal": 46937, + "Ġautoimmune": 46938, + ";;;;;;;;;;;;": 46939, + "ĠTotally": 46940, + "ĠOv": 46941, + "Ġdefences": 46942, + "ĠCoconut": 46943, + "Ġtranquil": 46944, + "Ġploy": 46945, + "Ġflavours": 46946, + "ĠFlask": 46947, + "ãĤ¨ãĥ«": 46948, + "ĠWeston": 46949, + "ĠVolvo": 46950, + "870": 46951, + "Ġmicrophones": 46952, + "verbal": 46953, + "RPG": 46954, + "Ġiii": 46955, + ";}": 46956, + "028": 46957, + "Ġheadlined": 46958, + "Ġprimed": 46959, + "Ġhoard": 46960, + "ĠShad": 46961, + "ĠENTER": 46962, + "Ġtriangular": 46963, + "Ġcapit": 46964, + "lik": 46965, + "ĠAncients": 46966, + "Ġlash": 46967, + "Ġconvol": 46968, + "Ġcolonel": 46969, + "enemy": 46970, + "Gra": 46971, + "Ġpubs": 46972, + "utters": 46973, + "Ġassigns": 46974, + "ĠPenet": 46975, + "ĠMonstrous": 46976, + "ĠBowen": 46977, + "ilver": 46978, + "Haunted": 46979, + "ĠDing": 46980, + "started": 46981, + "plin": 46982, + "Ġcontaminants": 46983, + "ĠDOE": 46984, + "ffen": 46985, + "ĠTechnician": 46986, + "Ry": 46987, + "Ġrobbers": 46988, + "Ġhotline": 46989, + "ĠGuardiola": 46990, + "ĠKaufman": 46991, + "rower": 46992, + "ĠDresden": 46993, + "ĠAlpine": 46994, + "Elf": 46995, + "Ġfmt": 46996, + "ĠSard": 46997, + "urses": 46998, + "gpu": 46999, + "Unix": 47000, + "Ġunequivocally": 47001, + "ĠCitizenship": 47002, + "quad": 47003, + "mire": 47004, + "ĠSweeney": 47005, + "Battery": 47006, + "615": 47007, + "Ġpancakes": 47008, + "Ġoats": 47009, + "Maps": 47010, + "ĠContrast": 47011, + "mbudsman": 47012, + "ĠEPS": 47013, + "Ġsubcommittee": 47014, + "Ġsourcing": 47015, + "Ġsizing": 47016, + "ĠBuffer": 47017, + "ĠMandatory": 47018, + "Ġmoderates": 47019, + "ĠPatterns": 47020, + "ĠChocobo": 47021, + "ĠZan": 47022, + "ĠSTATES": 47023, + "ĠJudging": 47024, + "ĠInher": 47025, + "*:": 47026, + "Ġbil": 47027, + "ĠYen": 47028, + "Ġexhilar": 47029, + "ollower": 47030, + "zers": 47031, + "Ġsnug": 47032, + "maximum": 47033, + "Ġdespicable": 47034, + "ĠPACK": 47035, + "ĠAnnex": 47036, + "Ġsarcastic": 47037, + "Ġlatex": 47038, + "Ġtamp": 47039, + "ĠSao": 47040, + "bah": 47041, + "ĠReverend": 47042, + "ĠChinatown": 47043, + "ĠAUT": 47044, + "documented": 47045, + "ĠGABA": 47046, + "ĠCanaan": 47047, + "ĠÙħ": 47048, + "Ġgoverns": 47049, + "prev": 47050, + "Esc": 47051, + "ĠEstimates": 47052, + "OSP": 47053, + "Ġendeavour": 47054, + "ĠClosing": 47055, + "ometime": 47056, + "everyone": 47057, + "Ġworsen": 47058, + "Ġscanners": 47059, + "Ġdeviations": 47060, + "ĠRobotics": 47061, + "ĠCompton": 47062, + "Ġsorcerer": 47063, + "Ġendogenous": 47064, + "Ġemulation": 47065, + "ĠPiercing": 47066, + "ĠAph": 47067, + "ĠSocket": 47068, + "Ġbould": 47069, + "ĠOU": 47070, + "ĠBorderlands": 47071, + "Ġ1863": 47072, + "Gordon": 47073, + "ĠWTO": 47074, + "Ġrestricts": 47075, + "Ġmosaic": 47076, + "Ġmelodies": 47077, + "çĦ": 47078, + "Tar": 47079, + "Ġdisson": 47080, + "ĠProvides": 47081, + "Ġ......": 47082, + "bek": 47083, + "FIX": 47084, + "Ġbroom": 47085, + "anship": 47086, + "Doctors": 47087, + "Ġnerds": 47088, + "ĠRegions": 47089, + "naissance": 47090, + "Ġmete": 47091, + "Ġcrept": 47092, + "plings": 47093, + "Ġgirlfriends": 47094, + "knit": 47095, + "igent": 47096, + "owe": 47097, + "Ġushered": 47098, + "ĠBaz": 47099, + "Mobil": 47100, + "434": 47101, + "ĠPresents": 47102, + "origin": 47103, + "Ġinsomnia": 47104, + "ĠAux": 47105, + "439": 47106, + "ĠChili": 47107, + "irsch": 47108, + "GAME": 47109, + "Ġgestation": 47110, + "algia": 47111, + "romising": 47112, + "$,": 47113, + "crow": 47114, + "ĠInspection": 47115, + "atomic": 47116, + "Relations": 47117, + "JOHN": 47118, + "roman": 47119, + "ĠClockwork": 47120, + "ĠBakr": 47121, + "mone": 47122, + "MET": 47123, + "Ġthirsty": 47124, + "Ġbc": 47125, + "Ġfaculties": 47126, + "Rum": 47127, + "Ġnuance": 47128, + "ĠDarius": 47129, + "pleting": 47130, + "fters": 47131, + "etchup": 47132, + "Registration": 47133, + "ĠKE": 47134, + "Rah": 47135, + "Ġpreferential": 47136, + "ĠLash": 47137, + "ĠHH": 47138, + "Valid": 47139, + "ĠNAV": 47140, + "Ġstarve": 47141, + "ĠGong": 47142, + "zynski": 47143, + "ĠActress": 47144, + "Ġwik": 47145, + "Ġunaccompanied": 47146, + "lvl": 47147, + "Bride": 47148, + "ADS": 47149, + "ĠCommando": 47150, + "ĠVaughn": 47151, + "Wallet": 47152, + "Ġhopping": 47153, + "ĠVie": 47154, + "Ġcaveats": 47155, + "Ġalas": 47156, + "ifled": 47157, + "abuse": 47158, + "661": 47159, + "Ġibn": 47160, + "Ġgul": 47161, + "Ġrobbing": 47162, + "til": 47163, + "ILA": 47164, + "Ġmitigating": 47165, + "Ġaptly": 47166, + "Ġtyrant": 47167, + "Ġmidday": 47168, + "ĠGilmore": 47169, + "ĠDecker": 47170, + "Ġ§§": 47171, + "partial": 47172, + "Exactly": 47173, + "Ġphenotype": 47174, + "Ġ[+]": 47175, + "ĠPlex": 47176, + "ĠIps": 47177, + "versions": 47178, + "Ġebook": 47179, + "Ġchic": 47180, + "gross": 47181, + "\":\"\"},{\"": 47182, + "ĠSurprisingly": 47183, + "Morgan": 47184, + "Ġresidues": 47185, + "ĠConfederation": 47186, + "infeld": 47187, + "Ġlyr": 47188, + "moderate": 47189, + "Ġperpendicular": 47190, + "VK": 47191, + "Ġsynchronized": 47192, + "Ġrefreshed": 47193, + "Ġadore": 47194, + "ĠTorment": 47195, + "olina": 47196, + "Ġ2600": 47197, + "ItemTracker": 47198, + "Ġpies": 47199, + "ĠFAT": 47200, + "ĠRHP": 47201, + "048": 47202, + "ĠRESP": 47203, + "ĠBJ": 47204, + "allows": 47205, + "Pand": 47206, + "Ġunwelcome": 47207, + "ĠVoc": 47208, + "ĠBastard": 47209, + "ĠOW": 47210, + "ĠLAR": 47211, + "ĠHealer": 47212, + "Environmental": 47213, + "ĠKenyan": 47214, + "ĠTrance": 47215, + "ĠPats": 47216, + "Ġaliases": 47217, + "ĠGarfield": 47218, + "Ġcampaigner": 47219, + "Ġadvancements": 47220, + "ĠOkinawa": 47221, + "ĠCoh": 47222, + "owsky": 47223, + "Ġstarved": 47224, + "Ġsizeable": 47225, + "Ġ:-)": 47226, + "ĠmRNA": 47227, + "Ġsuspensions": 47228, + "istar": 47229, + "Scotland": 47230, + "Prin": 47231, + "------------------------------------------------": 47232, + "Ġ502": 47233, + "Ġteaspoons": 47234, + "Ġ1050": 47235, + "Ġcoercive": 47236, + "ĠMasonic": 47237, + "edded": 47238, + "ĠPassenger": 47239, + "Ġlatt": 47240, + "Ġbraces": 47241, + "ĠSteal": 47242, + "ĠNYT": 47243, + "ĠKats": 47244, + "ĠCelest": 47245, + "aez": 47246, + "Tu": 47247, + "ĠCoulter": 47248, + "ðŁĺ": 47249, + "Flickr": 47250, + "ĠWilmington": 47251, + "iths": 47252, + "++;": 47253, + "Ġvending": 47254, + "Ġnegro": 47255, + "ĠPhi": 47256, + "ĠYellowstone": 47257, + "Callback": 47258, + "Ġshampoo": 47259, + "ĠShades": 47260, + "wat": 47261, + "Ġsuperhuman": 47262, + "Ġridiculed": 47263, + "Ġholiest": 47264, + "ombo": 47265, + "Ġinterns": 47266, + "Ġhone": 47267, + "ĠParagu": 47268, + "URI": 47269, + "Ġdangling": 47270, + "ãĤ»": 47271, + "sov": 47272, + "ictional": 47273, + "availability": 47274, + "Ġrevocation": 47275, + "Ġdow": 47276, + "inic": 47277, + "ĠTHEIR": 47278, + "Ġiso": 47279, + "Ġoutings": 47280, + "ĠLethal": 47281, + "Ġ)))": 47282, + "Ġinaccur": 47283, + "Ġoutlandish": 47284, + "Ġanus": 47285, + "letico": 47286, + "idon": 47287, + "lol": 47288, + "Ġunregulated": 47289, + "Ġsuccumbed": 47290, + "Ġcuff": 47291, + "ĠWasteland": 47292, + "letal": 47293, + "Ġsubstr": 47294, + "Ġcoffers": 47295, + "Ġautomakers": 47296, + "ovi": 47297, + "ĠXue": 47298, + "ĠDaytona": 47299, + "Ġjarring": 47300, + "Ġfumes": 47301, + "Ġdisbanded": 47302, + "zik": 47303, + "itton": 47304, + "Ġstrikingly": 47305, + "Ġspores": 47306, + "Adapter": 47307, + ".):": 47308, + "ĠLyndon": 47309, + "ivalry": 47310, + "Ġorally": 47311, + "Ġtumultuous": 47312, + "Ġdispleasure": 47313, + "Ġcones": 47314, + "orrect": 47315, + "Ġappease": 47316, + "Ġderby": 47317, + "ĠTripoli": 47318, + "ĠAless": 47319, + "Ġpoked": 47320, + "ĠGuilty": 47321, + "vP": 47322, + "Enough": 47323, + "Ġoriginals": 47324, + "699": 47325, + "Ġrabbi": 47326, + "Ġproverbial": 47327, + "Ġpostpone": 47328, + "elope": 47329, + "ĠMisty": 47330, + "Ġstaffed": 47331, + "ĠUnemployment": 47332, + "reditary": 47333, + "Ġdiligent": 47334, + "recomm": 47335, + "measures": 47336, + "asin": 47337, + "825": 47338, + "Ġponds": 47339, + "Ġmmol": 47340, + "ĠSAR": 47341, + "ĠCARE": 47342, + "Ġ371": 47343, + "Ġclenched": 47344, + "ĠCorsair": 47345, + "Ġcaricature": 47346, + "zn": 47347, + "attach": 47348, + "ĠSchro": 47349, + "speak": 47350, + "painted": 47351, + "ĠSuc": 47352, + "ĠENT": 47353, + "Ġcellul": 47354, + "ĠPaid": 47355, + "diagn": 47356, + "WHERE": 47357, + "Ġtexted": 47358, + "Barn": 47359, + "Ġretracted": 47360, + "ĠReferred": 47361, + "Sav": 47362, + "Ġupkeep": 47363, + "Ġworkplaces": 47364, + "ĠTokens": 47365, + "Ġamplify": 47366, + "clinical": 47367, + "Ġmultic": 47368, + "mberg": 47369, + "Ġconvoluted": 47370, + "Region": 47371, + "565": 47372, + "ĠTopic": 47373, + "Ġsnail": 47374, + "Ġsaline": 47375, + "Ġinsurrection": 47376, + "ĠPetr": 47377, + "forts": 47378, + "BAT": 47379, + "ĠNavajo": 47380, + "Ġrudimentary": 47381, + "ĠLaksh": 47382, + "ONDON": 47383, + "Measure": 47384, + "Ġtransformer": 47385, + "ĠGoddard": 47386, + "Ġcoincides": 47387, + "irin": 47388, + "Rex": 47389, + "ĠBok": 47390, + "quit": 47391, + "Ġshotguns": 47392, + "Ġproletarian": 47393, + "Ġscorp": 47394, + "ĠAda": 47395, + "514": 47396, + "Ġslander": 47397, + "recorded": 47398, + "Ġembell": 47399, + "risome": 47400, + "Ġapologizing": 47401, + "ĠMulcair": 47402, + "ĠGibraltar": 47403, + "Cla": 47404, + "Ġallot": 47405, + "ĠAttention": 47406, + "Ġ433": 47407, + "leave": 47408, + "Ġwhine": 47409, + "ĠIssa": 47410, + "ĠFaust": 47411, + "ĠBarron": 47412, + "heny": 47413, + "Ġvictimized": 47414, + "Jews": 47415, + "Ġnurturing": 47416, + "ettel": 47417, + "Winged": 47418, + "ĠSubtle": 47419, + "Ġflavorful": 47420, + "ĠReps": 47421, + "enged": 47422, + "callback": 47423, + "Ġdirectional": 47424, + "Ġclasp": 47425, + "ĠDirections": 47426, + "planet": 47427, + "iculture": 47428, + "Helper": 47429, + "icion": 47430, + "acia": 47431, + "Ġç¥ŀ": 47432, + "Ġsurges": 47433, + "Ġcanoe": 47434, + "ĠPremiership": 47435, + "been": 47436, + "Ġdefied": 47437, + "ĠTrooper": 47438, + "Ġtripod": 47439, + "Ġgasp": 47440, + "ĠEuph": 47441, + "ĠAds": 47442, + "vernight": 47443, + "highly": 47444, + "Role": 47445, + "Ġentangled": 47446, + "ĠZeit": 47447, + "618": 47448, + "ĠRusty": 47449, + "Ġhavens": 47450, + "ĠVaughan": 47451, + "HAEL": 47452, + "ĠSERVICE": 47453, + "/,": 47454, + "Ġstricken": 47455, + "Ġdelusions": 47456, + "Ġbis": 47457, + "ĠHaf": 47458, + "Ġgratification": 47459, + "Ġenticing": 47460, + "UNCH": 47461, + "Adams": 47462, + "ĠOLED": 47463, + "ĠBeetle": 47464, + "Ġ1899": 47465, + "ĠSOFTWARE": 47466, + "ategor": 47467, + "VL": 47468, + "ĠTotem": 47469, + "ĠGators": 47470, + "ATURES": 47471, + "Ġimpedance": 47472, + "Registered": 47473, + "ĠCary": 47474, + "ĠAerial": 47475, + "onne": 47476, + "enium": 47477, + "Ġdred": 47478, + "ĠBeg": 47479, + "Ġconcurrently": 47480, + "Ġsuperpower": 47481, + "ĠXan": 47482, + "jew": 47483, + "imester": 47484, + "ĠDickinson": 47485, + "âĶģ": 47486, + "Fla": 47487, + "Ġpree": 47488, + "ĠRollins": 47489, + "©¶æ": 47490, + "Ġdenomination": 47491, + "ĠLana": 47492, + "516": 47493, + "Ġinciting": 47494, + "scribed": 47495, + "juries": 47496, + "ĠWonders": 47497, + "approximately": 47498, + "Ġsuspending": 47499, + "Ġmountainous": 47500, + "ĠLaugh": 47501, + "oidal": 47502, + "Ns": 47503, + "Detect": 47504, + ")=": 47505, + "ĠLuthor": 47506, + "ĠSchwarzenegger": 47507, + "ĠMuller": 47508, + "ĠDevi": 47509, + "ecycle": 47510, + "Jar": 47511, + "613": 47512, + "ĠLongh": 47513, + "Bah": 47514, + "ĠSPORTS": 47515, + "nw": 47516, + "Ġrefinement": 47517, + "Ġwaterways": 47518, + "Ġdiner": 47519, + "Blade": 47520, + "683": 47521, + "Fac": 47522, + "Ġinitials": 47523, + "Ġrog": 47524, + "Ġparanormal": 47525, + "BUT": 47526, + "Ġ[(": 47527, + "ĠSwanson": 47528, + "ĠMesh": 47529, + "âĸ¬": 47530, + "Improve": 47531, + "ĠRadiation": 47532, + "ĠEsther": 47533, + "ĠEsk": 47534, + "ĠAly": 47535, + "iky": 47536, + "Ġirrad": 47537, + "ĠBuckingham": 47538, + "Ġrefill": 47539, + "Ġ._": 47540, + "Repe": 47541, + "CONCLUS": 47542, + "Ġdifferentiated": 47543, + "Ġchirop": 47544, + "ĠAtkins": 47545, + "Pattern": 47546, + "Ġexcise": 47547, + "Ġcabal": 47548, + "NSA": 47549, + "ĠSTA": 47550, + "ĠSIL": 47551, + "ĠParaly": 47552, + "Ġrye": 47553, + "ĠHowell": 47554, + "ĠCountdown": 47555, + "nesses": 47556, + "alysed": 47557, + "Ġresize": 47558, + "ãĤ½": 47559, + "Ġbudgetary": 47560, + "ĠStras": 47561, + "wang": 47562, + "Ġapiece": 47563, + "Ġprecincts": 47564, + "Ġpeach": 47565, + "Ġskyline": 47566, + "Ġ353": 47567, + "popular": 47568, + "Appearances": 47569, + "ĠMechanics": 47570, + "ĠDevOnline": 47571, + "Sullivan": 47572, + "Zen": 47573, + "Ġpu": 47574, + "opolis": 47575, + "544": 47576, + "Ġdeform": 47577, + "Ġcounteract": 47578, + "ĠLange": 47579, + "Ġ417": 47580, + "Console": 47581, + "774": 47582, + "Ġnodding": 47583, + "Ġpopulism": 47584, + "Ġhep": 47585, + "Ġcounselling": 47586, + "compliance": 47587, + "UFF": 47588, + "Ġundeniably": 47589, + "Ġrailing": 47590, + "ĠHorowitz": 47591, + "ĠSimone": 47592, + "ĠBungie": 47593, + "Ġak": 47594, + "ĠTalks": 47595, + "xff": 47596, + "flake": 47597, + "Crash": 47598, + "Ġsweaty": 47599, + "Ġbanquet": 47600, + "ĠOFFIC": 47601, + "Ġinventive": 47602, + "Ġastronomer": 47603, + "ĠStamford": 47604, + "ĠScare": 47605, + "ĠGREEN": 47606, + "olicited": 47607, + "Ġrusher": 47608, + "Ġcentrist": 47609, + "ighting": 47610, + "Ġsubclass": 47611, + "Ġdisav": 47612, + "Ġdefund": 47613, + "ĠNanto": 47614, + "ociate": 47615, + "mast": 47616, + "Ġpacif": 47617, + "Ġmend": 47618, + "eers": 47619, + "immigration": 47620, + "ESSION": 47621, + "Ġnumbering": 47622, + "Ġlaughable": 47623, + "ĠEnded": 47624, + "viation": 47625, + "emark": 47626, + "Pitt": 47627, + "Ġmeticulous": 47628, + "ĠLF": 47629, + "Ġcongratulated": 47630, + "ĠBirch": 47631, + "Ġswayed": 47632, + "Ġsemifinals": 47633, + "Ġhumankind": 47634, + "matter": 47635, + "ĠEquip": 47636, + "opausal": 47637, + "Said": 47638, + "ĠLayout": 47639, + "Ġvoicing": 47640, + "Ġthug": 47641, + "Ġpornographic": 47642, + "IPS": 47643, + "Ġmoaning": 47644, + "Ġgrievance": 47645, + "Ġconfessions": 47646, + "escal": 47647, + "TEXTURE": 47648, + "Authent": 47649, + "osaurus": 47650, + "Purchase": 47651, + "Ġrelegation": 47652, + "alter": 47653, + "Ġ³³": 47654, + "Ġriddled": 47655, + "Ġogre": 47656, + "ĠLowell": 47657, + "Occup": 47658, + "Eat": 47659, + "ĠHyder": 47660, + "ĠAdviser": 47661, + "Commerce": 47662, + "Hunt": 47663, + "ĠOrth": 47664, + "ĠCompetitive": 47665, + "ĠCLA": 47666, + "CDC": 47667, + "Ġsalads": 47668, + "Fle": 47669, + "Ġindustrialized": 47670, + "`,": 47671, + "ĠOWN": 47672, + "Ġbeck": 47673, + "ĠParticularly": 47674, + "oubt": 47675, + "ĠmM": 47676, + "ĠHussain": 47677, + "ĠChennai": 47678, + "Ġ920": 47679, + "Ġappointing": 47680, + "ĠCullen": 47681, + ",,,,,,,,": 47682, + "Ġpores": 47683, + "verified": 47684, + "Ġbiochemical": 47685, + "emate": 47686, + "Ġcowardly": 47687, + "ĠHelsinki": 47688, + "ĠEthiopian": 47689, + "SOURCE": 47690, + "ERC": 47691, + "estro": 47692, + "Ġbiotech": 47693, + "ĠSour": 47694, + "Ġbrewer": 47695, + "Bloomberg": 47696, + "Ġintensify": 47697, + "Glass": 47698, + "anco": 47699, + "ĠFDR": 47700, + "greSQL": 47701, + "ĠFires": 47702, + "©¶æ¥µ": 47703, + "eco": 47704, + "1001": 47705, + "ĠHomeless": 47706, + "Ġinstantaneous": 47707, + "ĠHaste": 47708, + "igel": 47709, + "Diamond": 47710, + "Ġpaving": 47711, + "Ġlandfill": 47712, + "Ġdads": 47713, + "houn": 47714, + ":]": 47715, + "Ġincendiary": 47716, + "ĠLivingston": 47717, + "ĠHilbert": 47718, + "ĠChecks": 47719, + "styles": 47720, + "inators": 47721, + "ĠClive": 47722, + "phrine": 47723, + "Ġchimpanzees": 47724, + "Ġpall": 47725, + "ĠJM": 47726, + "ĠAadhaar": 47727, + "ðĿ": 47728, + "Ġachievable": 47729, + "disabled": 47730, + "PET": 47731, + "OOOOOOOO": 47732, + "Mot": 47733, + "Ġintangible": 47734, + "Ġballet": 47735, + "ĠWebs": 47736, + "ĠEstimated": 47737, + "Effects": 47738, + "Ġbailed": 47739, + "Joshua": 47740, + "Ġturbulence": 47741, + "Ġoccupant": 47742, + "ĠDaylight": 47743, + "Ġ361": 47744, + "meet": 47745, + "Ġstatically": 47746, + "Ġonlook": 47747, + "Ġki": 47748, + "illegal": 47749, + "Ġvelvet": 47750, + "Ġdehydration": 47751, + "Ġacquies": 47752, + "ĠRez": 47753, + "akura": 47754, + "ĠUpton": 47755, + "atro": 47756, + "Ġincomprehensible": 47757, + "Ġbackdoor": 47758, + "ĠRhino": 47759, + "727": 47760, + "Ġmaths": 47761, + ")+": 47762, + "Ġheresy": 47763, + "Ġdf": 47764, + "ĠRoche": 47765, + "ĠLydia": 47766, + "Ġpancreat": 47767, + "reply": 47768, + "arrell": 47769, + "Ġsolicitation": 47770, + "Ġcircadian": 47771, + "BIP": 47772, + "Ġforay": 47773, + "Ġcryptic": 47774, + "izu": 47775, + "imeo": 47776, + "ĠTomato": 47777, + "ĠHoms": 47778, + "examination": 47779, + "Ġquarry": 47780, + "ĠValiant": 47781, + "ĠJericho": 47782, + "ĠINCLUD": 47783, + "Ġ1840": 47784, + "519": 47785, + "Ġresists": 47786, + "Ġsnapshots": 47787, + "ĠSpur": 47788, + "ĠAntiqu": 47789, + "Login": 47790, + "Ġbestselling": 47791, + "Ġantic": 47792, + "ĠSutherland": 47793, + "ãĤ¢ãĥ«": 47794, + "Ġ~/": 47795, + "ĠParm": 47796, + "èĥ": 47797, + "Pages": 47798, + "intensity": 47799, + "Ġimmobil": 47800, + "Ġ1865": 47801, + "zzo": 47802, + "Ġnifty": 47803, + "Ġfentanyl": 47804, + "ĠPreservation": 47805, + "ophen": 47806, + "Ġdarts": 47807, + "ĠDinosaur": 47808, + "pointers": 47809, + "ĠRite": 47810, + "suggest": 47811, + "awareness": 47812, + "ĠSheridan": 47813, + "Ġstances": 47814, + "Ġsorcery": 47815, + "Ġperjury": 47816, + "ĠNikola": 47817, + "iever": 47818, + "Ġfiance": 47819, + "ĠJordanian": 47820, + "ĠBalloon": 47821, + "Ġnab": 47822, + "Ġkb": 47823, + "Ġhumanities": 47824, + "ĠTanaka": 47825, + "hillary": 47826, + "Ġconsultancy": 47827, + "ĠZub": 47828, + "Ġremission": 47829, + "Ġconfid": 47830, + "CHQ": 47831, + "ĠFug": 47832, + "Ġimprovis": 47833, + "Yep": 47834, + "/_": 47835, + "Ġunwillingness": 47836, + "Ġportfolios": 47837, + "055": 47838, + "ĠInstructor": 47839, + "aiman": 47840, + "Ġclaimants": 47841, + "Mbps": 47842, + "ĠBye": 47843, + "received": 47844, + "Tweet": 47845, + "Ġindemn": 47846, + "riz": 47847, + "amara": 47848, + "Nat": 47849, + "Ġevaluates": 47850, + "ĠLur": 47851, + "epad": 47852, + "FOX": 47853, + "ĠThro": 47854, + "Ġrusty": 47855, + "Ġbedrock": 47856, + "ĠOprah": 47857, + "JB": 47858, + "Ġmanipulative": 47859, + "Ġwillful": 47860, + "Ġrelapse": 47861, + "Ġextant": 47862, + "Theme": 47863, + "Sensor": 47864, + "ĠStability": 47865, + "govern": 47866, + "Ġpoppy": 47867, + "Ġknack": 47868, + "Ġinsulated": 47869, + "ĠTile": 47870, + "ĠExtrem": 47871, + "Ġuntold": 47872, + "Ġconverge": 47873, + "Ġrefuel": 47874, + "igroup": 47875, + "Ġdistortions": 47876, + "Ġravaged": 47877, + "Ġmechanically": 47878, + "ĠReilly": 47879, + "ĠNose": 47880, + "ĠIncarnation": 47881, + "ĠBecky": 47882, + "abbling": 47883, + "Ġtaco": 47884, + "Ġrake": 47885, + "Ġmelancholy": 47886, + "Ġillustrious": 47887, + "ĠDartmouth": 47888, + "Guide": 47889, + "ĠRazer": 47890, + "ĠBenz": 47891, + "Ultimate": 47892, + "ĠSurprise": 47893, + "Ġpageant": 47894, + "offer": 47895, + "Whoever": 47896, + "Ġwiser": 47897, + "Ġchemist": 47898, + "ĠHELL": 47899, + "ĠBulk": 47900, + "Ġplutonium": 47901, + "ĠCOVER": 47902, + "Ö¼": 47903, + "failed": 47904, + "Ġtirelessly": 47905, + "Ġinfertility": 47906, + "ĠTrident": 47907, + "ĠShowtime": 47908, + "ĠCiv": 47909, + "Vice": 47910, + "requires": 47911, + "ittance": 47912, + "Ġuncontrolled": 47913, + "interesting": 47914, + "561": 47915, + "Ġinnovate": 47916, + "ategic": 47917, + "Lie": 47918, + "ĠSelling": 47919, + "Ul": 47920, + "Ġsavior": 47921, + "ĠTosh": 47922, + "Ġswast": 47923, + "PASS": 47924, + "Ġrink": 47925, + "Ġcardio": 47926, + "ĠIro": 47927, + "udi": 47928, + "Ġvantage": 47929, + "Ġvans": 47930, + "ĠNiño": 47931, + "+=": 47932, + "Ġpropagate": 47933, + "": 49029, + "Ġleukemia": 49030, + "Ġeluc": 49031, + "Ġannouncer": 49032, + "ĠLithuan": 49033, + "ĠArmageddon": 49034, + "åĩ": 49035, + "Lenin": 49036, + "ĠRuk": 49037, + "Ġpepp": 49038, + "ĠRomantic": 49039, + "ĠPIT": 49040, + "ĠInterstellar": 49041, + "ĠAtkinson": 49042, + "Raid": 49043, + "Js": 49044, + "Goal": 49045, + "Course": 49046, + "Ġvanishing": 49047, + "esley": 49048, + "ĠRounds": 49049, + "Elsa": 49050, + "593": 49051, + "Ġredundancy": 49052, + "ĠSTAND": 49053, + "Ġprophetic": 49054, + "Ġhabitable": 49055, + "ryu": 49056, + "Ġfaintly": 49057, + "MODE": 49058, + "Ġflanked": 49059, + "IRC": 49060, + "Awesome": 49061, + "Ġspurious": 49062, + "ĠZah": 49063, + "ĠMSG": 49064, + "Ġshading": 49065, + "Ġmotivational": 49066, + "ĠSantana": 49067, + "ĠSPR": 49068, + "Ġexcruciating": 49069, + "omial": 49070, + "ĠMiko": 49071, + "ĠLeopard": 49072, + "Abyss": 49073, + "Ġ[|": 49074, + "dirty": 49075, + "Ġbaths": 49076, + "Ġdemoral": 49077, + "andre": 49078, + "PB": 49079, + "Ġunification": 49080, + "Ġsacrament": 49081, + "Ġ[&": 49082, + "Ġpriceless": 49083, + "Ġgelatin": 49084, + "Ġemanating": 49085, + "ĠAllaah": 49086, + "986": 49087, + "Ġoutburst": 49088, + "Ġeras": 49089, + "ĠXVI": 49090, + "ĠSPI": 49091, + "Ott": 49092, + "ĠLazarus": 49093, + "PLIED": 49094, + "Flying": 49095, + "blogs": 49096, + "Wisconsin": 49097, + "Raven": 49098, + "Ġrebate": 49099, + "Ġcreeps": 49100, + "ĠSpan": 49101, + "ĠPainter": 49102, + "ĠKira": 49103, + "ĠAmos": 49104, + "ĠCorvette": 49105, + "Consumer": 49106, + "ĠRecover": 49107, + "cki": 49108, + "Ġpesky": 49109, + "ĠInvention": 49110, + "Companies": 49111, + "Ġchallengers": 49112, + "ademic": 49113, + "ĠUkrainians": 49114, + "ĠNeurolog": 49115, + "ĠForsaken": 49116, + "Ġentrants": 49117, + "Ġembattled": 49118, + "Ġdefunct": 49119, + "ĠGlacier": 49120, + "Ġpoisons": 49121, + "ĠHorses": 49122, + "makes": 49123, + "ĠDirt": 49124, + "Ġ423": 49125, + "hhh": 49126, + "ĠTransformation": 49127, + "QUIRE": 49128, + "..................": 49129, + "Ġtraveller": 49130, + "ĠSexy": 49131, + "ĠKern": 49132, + "ipolar": 49133, + "Ġransomware": 49134, + "oooooooooooooooo": 49135, + "Ec": 49136, + "ruby": 49137, + "Professional": 49138, + "ĠOutbreak": 49139, + "argument": 49140, + "Grey": 49141, + "ĠFifa": 49142, + "ĠCHO": 49143, + "ĠFORM": 49144, + "ĠAmtrak": 49145, + "-[": 49146, + "Ġcradle": 49147, + "Ġantioxidants": 49148, + "ãģ®å®": 49149, + "736": 49150, + "ĠNASL": 49151, + "ĠContributions": 49152, + "Indiana": 49153, + "ĠSTEP": 49154, + "CSS": 49155, + "Ġsalient": 49156, + "Ġallocations": 49157, + "yrights": 49158, + "Ġmashed": 49159, + "ĠCutter": 49160, + "Sexual": 49161, + "Ġpounded": 49162, + "Ġfanbase": 49163, + "Ġcasc": 49164, + "ĠTransparency": 49165, + "Ġanalytic": 49166, + "ĠSummoner": 49167, + "×ŀ": 49168, + "ĠADC": 49169, + "detail": 49170, + "Ġvanquished": 49171, + "Ġcrabs": 49172, + "arie": 49173, + "Destroy": 49174, + "ĠSack": 49175, + "Ġtransistor": 49176, + "Alabama": 49177, + "ĠKoen": 49178, + "ĠFisheries": 49179, + "cone": 49180, + "Ġannexed": 49181, + "ĠMGM": 49182, + "esa": 49183, + "Ġfaked": 49184, + "ĠCongratulations": 49185, + "Ġhindered": 49186, + "Ġcorrectional": 49187, + "ĠITV": 49188, + "leeve": 49189, + "Ġinappropriately": 49190, + "licks": 49191, + "Ġtrespass": 49192, + "Ġpaws": 49193, + "Ġnegotiator": 49194, + "ĠChristensen": 49195, + "limits": 49196, + "ĠDianne": 49197, + "Ġelegance": 49198, + "ĠContracts": 49199, + "anke": 49200, + "Obj": 49201, + "Ġvigilance": 49202, + "Ġcastles": 49203, + "ĠNAD": 49204, + "ĠHolo": 49205, + "Ġemphatically": 49206, + "ĠTitus": 49207, + "ĠServing": 49208, + "ĠRichie": 49209, + "ĠPigs": 49210, + "568": 49211, + "Ġanimosity": 49212, + "ĠAttributes": 49213, + "ĠUriel": 49214, + "MQ": 49215, + "myra": 49216, + "ĠApplicant": 49217, + "Ġpsychiatrists": 49218, + "ĠVij": 49219, + "ĠAbby": 49220, + "agree": 49221, + "Push": 49222, + "ĠkWh": 49223, + "hiba": 49224, + "Ġincite": 49225, + "ĠWeasley": 49226, + "ĠTaxi": 49227, + "ministic": 49228, + "hyper": 49229, + "ĠFarn": 49230, + "Ġ601": 49231, + "ĠNationwide": 49232, + "Fake": 49233, + "952": 49234, + "Ġmaize": 49235, + "Ġinteracted": 49236, + "Ġtransitioned": 49237, + "Ġparasitic": 49238, + "Ġharmonic": 49239, + "Ġdecaying": 49240, + "Ġbaseless": 49241, + "nsics": 49242, + "Ġtranspired": 49243, + "Ġabundantly": 49244, + "ĠForensic": 49245, + "Ġtreadmill": 49246, + "ĠJav": 49247, + "aband": 49248, + "Ġsshd": 49249, + "Ġfrontman": 49250, + "ĠJakarta": 49251, + "oller": 49252, + "drops": 49253, + "ĠSERVICES": 49254, + "romptu": 49255, + "ophical": 49256, + "hospital": 49257, + "bledon": 49258, + "645": 49259, + "Ġmidrange": 49260, + "ĠEVENT": 49261, + "culated": 49262, + "rawled": 49263, + "Ġperched": 49264, + "Ġoverboard": 49265, + "ĠPeel": 49266, + "ĠPwr": 49267, + "ĠCarth": 49268, + "ĠCOMPLE": 49269, + "coe": 49270, + "shall": 49271, + "Ġdeterrence": 49272, + "METHOD": 49273, + "ĠAbsent": 49274, + "MEN": 49275, + "Ġsill": 49276, + "ĠLEVEL": 49277, + "York": 49278, + "Ġsinners": 49279, + "ĠOPEC": 49280, + "ĠNur": 49281, + "ĠDesigns": 49282, + "selection": 49283, + "Ġunworthy": 49284, + "CHA": 49285, + "Ġstrengthens": 49286, + "883": 49287, + "edly": 49288, + "Ġslicing": 49289, + "Ġmalnutrition": 49290, + "Ġfilmmaking": 49291, + "ĠPolk": 49292, + "urated": 49293, + "Ġ421": 49294, + "breakers": 49295, + "!'\"": 49296, + "Ġwetlands": 49297, + "ĠDiscrimination": 49298, + "Ġallowable": 49299, + "Ġsteered": 49300, + "ĠSicily": 49301, + "SAM": 49302, + "Ġmustache": 49303, + "Ġmids": 49304, + "Ġclipped": 49305, + "Ġcirculate": 49306, + "Ġbrittle": 49307, + "ĠBuildings": 49308, + "raised": 49309, + "ĠRoundup": 49310, + "Ġwealthier": 49311, + "Ġoverwrite": 49312, + "Ġoverpowered": 49313, + "ĠGerrard": 49314, + "sites": 49315, + "PDATED": 49316, + "Ġacutely": 49317, + "ĠGamble": 49318, + "Ġpim": 49319, + "ĠKus": 49320, + "Typically": 49321, + "Deploy": 49322, + "ĠMoroccan": 49323, + "potion": 49324, + "combe": 49325, + "Ġvigilante": 49326, + "Ġ363": 49327, + "Stew": 49328, + "ĠBagg": 49329, + "Ġresided": 49330, + "ĠSpo": 49331, + "Ġremnant": 49332, + "Ġemptiness": 49333, + "brainer": 49334, + "Ġoutpatient": 49335, + "priority": 49336, + "Ġleptin": 49337, + "ĠPayton": 49338, + "ĠGleaming": 49339, + "ĠShed": 49340, + "ĠPolo": 49341, + "ĠMormonism": 49342, + "restricted": 49343, + "arlane": 49344, + "wx": 49345, + "Ġcreatine": 49346, + "ĠAnon": 49347, + "ĠSTUD": 49348, + "ĠJUL": 49349, + "ĠTee": 49350, + "528": 49351, + "089": 49352, + "Ġhatched": 49353, + "Dispatch": 49354, + "ĠComposite": 49355, + "Ġ451": 49356, + "puff": 49357, + "ĠXCOM": 49358, + "ĠOrn": 49359, + "ĠTHANK": 49360, + "ENDED": 49361, + "ĠAsheville": 49362, + "ĠÃľ": 49363, + "Ġmango": 49364, + "ĠSlightly": 49365, + "worldly": 49366, + "ĠWander": 49367, + "ĠExpand": 49368, + "ĠChr": 49369, + "Mist": 49370, + "Ġorthodoxy": 49371, + "ĠUNESCO": 49372, + "regate": 49373, + "Elsewhere": 49374, + "kie": 49375, + "irled": 49376, + "Ġtopple": 49377, + "Ġadoptive": 49378, + "ĠLegs": 49379, + "dress": 49380, + "ĠSagan": 49381, + "bare": 49382, + "ĠGlou": 49383, + "Crunch": 49384, + "Ġhelpers": 49385, + "Ġchronically": 49386, + "ĠHuma": 49387, + "10000": 49388, + "Ġaccommodating": 49389, + "äºĶ": 49390, + "Ġwrinkles": 49391, + "Ġdodged": 49392, + "fourth": 49393, + "Ġprecon": 49394, + "Ġcompressor": 49395, + "ĠKare": 49396, + "Ġevict": 49397, + "ĠWarwick": 49398, + "imar": 49399, + "Ġmodernization": 49400, + "Ġbandwagon": 49401, + "Ġrefuted": 49402, + "Ġnetted": 49403, + "ĠNaples": 49404, + "ĠGenie": 49405, + "perors": 49406, + "Ġfielded": 49407, + "Ġdere": 49408, + "ĠParables": 49409, + "lees": 49410, + "Ġtrout": 49411, + "aspers": 49412, + "Ġnihil": 49413, + "Ġhappiest": 49414, + "Ġfloppy": 49415, + "ĠLoft": 49416, + "ĠHeard": 49417, + "Ġunison": 49418, + "Ġlug": 49419, + "ĠRedmond": 49420, + "classic": 49421, + "Supporters": 49422, + "SHIP": 49423, + "GMT": 49424, + "Ġfuelled": 49425, + "çIJ": 49426, + "Ġdd": 49427, + "ĠEminem": 49428, + "Ġ1897": 49429, + "NYSE": 49430, + "Ġsecretaries": 49431, + "ĠFIA": 49432, + "ĠCanaveral": 49433, + "Favorite": 49434, + "Ġpomp": 49435, + "Ġdetainee": 49436, + "ership": 49437, + "aimon": 49438, + "iour": 49439, + "ĠApex": 49440, + "Ġplantations": 49441, + "amia": 49442, + "acion": 49443, + "Rust": 49444, + "Ġtowed": 49445, + "ĠTruly": 49446, + "577": 49447, + "Ġsheltered": 49448, + "rider": 49449, + "Wo": 49450, + "Ġlair": 49451, + "ĠIntelligent": 49452, + "improve": 49453, + "matically": 49454, + "Ġetiquette": 49455, + "adra": 49456, + "allo": 49457, + "ĠJuno": 49458, + "anything": 49459, + "ĠStruggle": 49460, + "ĠPredict": 49461, + "ĠGrimes": 49462, + "ĠAMERICA": 49463, + "ctx": 49464, + "ĠSituation": 49465, + "WOOD": 49466, + "Ġsoluble": 49467, + "meier": 49468, + "Ġintolerable": 49469, + "angering": 49470, + "Ġuninterrupted": 49471, + "Ġtooltip": 49472, + "Ġinterrogated": 49473, + "Ġgunned": 49474, + "ĠSneak": 49475, + "æѦ": 49476, + "Ġtether": 49477, + "Ġcrumble": 49478, + "Lens": 49479, + "Ġclustered": 49480, + "ĠSyl": 49481, + "ĠHasan": 49482, + "Ġdystopian": 49483, + "wana": 49484, + "Ġjoystick": 49485, + "ĠThib": 49486, + "ammu": 49487, + "Tomorrow": 49488, + "546": 49489, + "Ġovercame": 49490, + "Ġminimized": 49491, + "ceptor": 49492, + "Runner": 49493, + "ENGTH": 49494, + "ĠBrenda": 49495, + "ĠAchievements": 49496, + "Ġtorches": 49497, + "Ġrapport": 49498, + "ĠInvestigator": 49499, + "ĠHandling": 49500, + "relation": 49501, + "grey": 49502, + "815": 49503, + "Ġkcal": 49504, + "ĠCommands": 49505, + "dq": 49506, + "Ġcurls": 49507, + "Ġbearer": 49508, + "Ġcynicism": 49509, + "itri": 49510, + "ĠUseful": 49511, + "Bee": 49512, + "DCS": 49513, + "Ġabras": 49514, + "Pract": 49515, + "BILITIES": 49516, + "712": 49517, + "Ġdebugger": 49518, + "Ġdebtor": 49519, + "ĠLia": 49520, + "ĠKers": 49521, + "Ġexacerbate": 49522, + "ĠStacy": 49523, + "ĠBland": 49524, + "ĠScenes": 49525, + "Ġbranching": 49526, + "âĸĪâĸĪâĸĪâĸĪâĸĪâĸĪâĸĪâĸĪ": 49527, + "apeake": 49528, + "Ġsalsa": 49529, + "Ġmishand": 49530, + "ĠKonami": 49531, + "ĠNib": 49532, + "Ġanecdote": 49533, + "Ġagreeable": 49534, + "Ïī": 49535, + "ĠNathaniel": 49536, + "ĠHeisman": 49537, + "ĠBeware": 49538, + "Ġ1886": 49539, + "spective": 49540, + "691": 49541, + "522": 49542, + "Ġinhibits": 49543, + "Ġhashing": 49544, + "Ġ1889": 49545, + "å°Ĩ": 49546, + "vich": 49547, + "Pure": 49548, + "Ġsolidly": 49549, + "Ġaspirin": 49550, + "imaru": 49551, + "Ġstreetcar": 49552, + "ĠUCS": 49553, + "ĠJudd": 49554, + "Ġflashbacks": 49555, + "pins": 49556, + "Ġ1440": 49557, + "ĠUNHCR": 49558, + "ĠSymptoms": 49559, + "TIT": 49560, + "538": 49561, + "Fra": 49562, + "%);": 49563, + "Ġooz": 49564, + "Ġcurfew": 49565, + "Ġcalmed": 49566, + "Ġparticipates": 49567, + "TeX": 49568, + "Ġnonsensical": 49569, + "Ġfullback": 49570, + "ĠDeL": 49571, + "monkey": 49572, + "hari": 49573, + "Ġmetabolites": 49574, + "Ġlooted": 49575, + "ĠALWAYS": 49576, + "ĠBCC": 49577, + "Lt": 49578, + "ochet": 49579, + "Bone": 49580, + "Ġvetoed": 49581, + "Ġgcc": 49582, + "ĠCLICK": 49583, + "Ġ1888": 49584, + "saf": 49585, + "Ġstiffness": 49586, + "Ġlowly": 49587, + "ĠGeh": 49588, + "verson": 49589, + "orset": 49590, + "Ġunforeseen": 49591, + "Ġanesthesia": 49592, + "ĠOptical": 49593, + "Ġreconstructed": 49594, + "ĠTup": 49595, + "shows": 49596, + "NEWS": 49597, + "ĠNewspaper": 49598, + "ĠASA": 49599, + "tera": 49600, + "Numbers": 49601, + "Ġinexplicable": 49602, + "×ij": 49603, + "Ġhardness": 49604, + "untarily": 49605, + "ĠAcer": 49606, + "gradient": 49607, + "ARDIS": 49608, + "Ġwoodland": 49609, + "Ġmetaphors": 49610, + "ĠWembley": 49611, + "ĠPavel": 49612, + "philis": 49613, + "Ġrewriting": 49614, + "Ġperceptual": 49615, + "Ġ1070": 49616, + "worms": 49617, + "ĠDowns": 49618, + "Ġunsurprisingly": 49619, + "Ġtagging": 49620, + "flame": 49621, + "Ġlitres": 49622, + "Ġbounces": 49623, + "ĠBabe": 49624, + "shut": 49625, + "Ġoverdoses": 49626, + "ĠSheila": 49627, + "ĠChau": 49628, + "ĠBless": 49629, + "Capture": 49630, + "ĠSignificant": 49631, + "ĠScion": 49632, + "Ġ389": 49633, + "ĠMcH": 49634, + "ĠTitanium": 49635, + "ĠMeal": 49636, + "ameda": 49637, + "agents": 49638, + "aggressive": 49639, + "Billy": 49640, + "763": 49641, + "ĠSaying": 49642, + "DERR": 49643, + "itone": 49644, + "Collins": 49645, + "Bound": 49646, + "Ġbolted": 49647, + "ĠDMCA": 49648, + "953": 49649, + "Ġuniqueness": 49650, + "Ġepigen": 49651, + "unci": 49652, + "antam": 49653, + "Ġreckoning": 49654, + "chairs": 49655, + "OGR": 49656, + "ĠSenegal": 49657, + "Ġ1862": 49658, + "relevant": 49659, + "Ġ¯": 49660, + "Ġpharmacies": 49661, + "ĠGeral": 49662, + "vier": 49663, + "Yan": 49664, + "ORPG": 49665, + "Ġrabid": 49666, + "bending": 49667, + "ĠUNITED": 49668, + "Ġ465": 49669, + "Assembly": 49670, + "Ġweep": 49671, + "Ġbehest": 49672, + "ĠMothers": 49673, + "ĠJace": 49674, + "hid": 49675, + "Ġwhirlwind": 49676, + "ĠUNIVERS": 49677, + "Ġutopian": 49678, + "Ġkidnap": 49679, + "Philipp": 49680, + "Kin": 49681, + "893": 49682, + "Ġlivestream": 49683, + "ĠMISS": 49684, + "Ġsubversive": 49685, + "ĠTechniques": 49686, + "ĠJUSTICE": 49687, + "ĠBASE": 49688, + "Ġ387": 49689, + "Ġassailants": 49690, + "ĠHardcore": 49691, + "Ġsprinkled": 49692, + "ĠPse": 49693, + "éļ": 49694, + "printed": 49695, + "ĠHau": 49696, + "ORGE": 49697, + "ĠTOUR": 49698, + "Ġlaced": 49699, + "Ġitch": 49700, + "Giving": 49701, + "Ġported": 49702, + "781": 49703, + "////////////////////////////////": 49704, + "breeding": 49705, + "Ġlogger": 49706, + "ĠHOL": 49707, + "innie": 49708, + "Firstly": 49709, + "Ġembryonic": 49710, + "Ġdelegated": 49711, + "pai": 49712, + "OIL": 49713, + "Ġcentrally": 49714, + "ĠRx": 49715, + "ĠScouting": 49716, + "Dutch": 49717, + "Ġhereditary": 49718, + "ĠCruiser": 49719, + "sat": 49720, + "529": 49721, + "ĠMarriott": 49722, + "othermal": 49723, + "Ġprohibitions": 49724, + "Earn": 49725, + "ĠStab": 49726, + "ĠColleges": 49727, + "ĠBelief": 49728, + "stretched": 49729, + "ĠLH": 49730, + "ĠEntityItem": 49731, + "CIA": 49732, + "Ġunrem": 49733, + "Ġlaureate": 49734, + "Ġdenominations": 49735, + "summary": 49736, + "hler": 49737, + "Spect": 49738, + "ĠKlaus": 49739, + "ĠBeans": 49740, + "Ġinsur": 49741, + "ĠPAX": 49742, + "Ġfielder": 49743, + "ĠVet": 49744, + "ĠSparrow": 49745, + "zie": 49746, + "ĠSQ": 49747, + "ĠMondays": 49748, + "ĠOffline": 49749, + "ĠLerner": 49750, + "ĠExtensions": 49751, + "Ireland": 49752, + "Ġpatronage": 49753, + "Ġcontrasted": 49754, + "ĠMania": 49755, + "hirt": 49756, + "Moscow": 49757, + "Ġcondemns": 49758, + "ĠAnge": 49759, + "Ġcomposing": 49760, + "ĠPepe": 49761, + "ĠPaddock": 49762, + "Ġheterogeneity": 49763, + "Ġideologically": 49764, + "Ġfishes": 49765, + "Ġcursing": 49766, + "ĠRutherford": 49767, + "ĠFloating": 49768, + "ĠAmelia": 49769, + "Tea": 49770, + "Synopsis": 49771, + "Ġstunts": 49772, + "Ġbead": 49773, + "Ġstocking": 49774, + "ĠMILL": 49775, + "obook": 49776, + "massive": 49777, + "\\<": 49778, + "Ġhump": 49779, + "ĠPreferences": 49780, + "EngineDebug": 49781, + "geist": 49782, + "ĠNieto": 49783, + "omever": 49784, + "ishy": 49785, + "evaluate": 49786, + "colonial": 49787, + "Alternative": 49788, + "ĠGoPro": 49789, + "ĠVortex": 49790, + "ĠNETWORK": 49791, + "ansky": 49792, + "Secure": 49793, + "ĠThrust": 49794, + "Snake": 49795, + "Ġparcels": 49796, + "Ġsamurai": 49797, + "Ġactresses": 49798, + "Nap": 49799, + "MF": 49800, + "iferation": 49801, + "Beer": 49802, + "523": 49803, + "ĠIly": 49804, + "ointment": 49805, + "Ping": 49806, + "Ġstriped": 49807, + "ĠMellon": 49808, + "ossession": 49809, + "Ġneutron": 49810, + "endium": 49811, + "Ġaph": 49812, + "ĠFlavoring": 49813, + "Ġ383": 49814, + "Ġresponsiveness": 49815, + "ĠJindal": 49816, + "ĠHitchcock": 49817, + "Denver": 49818, + "ĠDRAGON": 49819, + "smanship": 49820, + "ĠDupl": 49821, + "Ġsly": 49822, + "Ġwebcam": 49823, + "ĠTwain": 49824, + "ĠDarling": 49825, + "iliate": 49826, + "consumer": 49827, + "DIT": 49828, + "Ġnamesake": 49829, + "Ġunorthodox": 49830, + "Ġfuner": 49831, + "ĠPLoS": 49832, + "ĠCONTROL": 49833, + "ozyg": 49834, + "oglobin": 49835, + "FACE": 49836, + "ERG": 49837, + "ĠDia": 49838, + "ĠFiesta": 49839, + "cele": 49840, + "034": 49841, + "Ġenclave": 49842, + "âĸ¬âĸ¬": 49843, + "onement": 49844, + "alist": 49845, + "Mand": 49846, + "Ġhomegrown": 49847, + "ĠFancy": 49848, + "Ġconceptions": 49849, + "ĠContains": 49850, + "ureen": 49851, + "Ġreiterate": 49852, + "Ġmeager": 49853, + "Ġinstallments": 49854, + "Spawn": 49855, + "627": 49856, + "Ġphotoc": 49857, + "ĠCabrera": 49858, + "ĠRosenthal": 49859, + "ĠLansing": 49860, + "isner": 49861, + "Ġinvests": 49862, + "ĠUFOs": 49863, + "EXP": 49864, + "Hardware": 49865, + "Ġtragically": 49866, + "Ġconcedes": 49867, + "ieft": 49868, + "cham": 49869, + "borgh": 49870, + "ĠSchr": 49871, + "ĠMelanie": 49872, + "ĠHoy": 49873, + "Ġvisitation": 49874, + "Ġidiosyncr": 49875, + "Ġfractions": 49876, + "Ġforeskin": 49877, + "obos": 49878, + "Ġpoaching": 49879, + "ĠVIEW": 49880, + "Ġstimulates": 49881, + "ĠGork": 49882, + "canon": 49883, + "MIC": 49884, + "ĠNemesis": 49885, + "ĠIndra": 49886, + "ĠDMV": 49887, + "Ġ529": 49888, + "Ġinspecting": 49889, + "Ġgrandma": 49890, + "ĠWhedon": 49891, + "ĠShant": 49892, + "ĠPurg": 49893, + "ikan": 49894, + "ĠTeg": 49895, + "ĠCLR": 49896, + "zac": 49897, + "Victoria": 49898, + "ĠVerify": 49899, + "ionics": 49900, + "Ġpartying": 49901, + "ĠMou": 49902, + "colour": 49903, + "Ġtestimonies": 49904, + "lations": 49905, + "Ġpressuring": 49906, + "hiro": 49907, + "acers": 49908, + "Ġfid": 49909, + "angler": 49910, + "ĠCSI": 49911, + "Ġhereafter": 49912, + "Ġdissidents": 49913, + "reporting": 49914, + "iphany": 49915, + "chev": 49916, + "Ġsolitude": 49917, + "Ġlobe": 49918, + "Ġindis": 49919, + "Ġcredential": 49920, + "recent": 49921, + "adult": 49922, + "ĠNirvana": 49923, + "ĠFranchise": 49924, + "Layer": 49925, + "Hyp": 49926, + "ĠBerkshire": 49927, + "Ġwills": 49928, + "tif": 49929, + "Ġtotem": 49930, + "ĠJudah": 49931, + "repair": 49932, + "Instant": 49933, + "548": 49934, + "Ġembassies": 49935, + "Ġbottleneck": 49936, + "Ġbount": 49937, + "Ġtypew": 49938, + "ĠAlvin": 49939, + "jing": 49940, + "imilar": 49941, + "Rush": 49942, + "Ġbrim": 49943, + "ĠHELP": 49944, + "Aim": 49945, + "]'": 49946, + "Ġpassively": 49947, + "Ġbounded": 49948, + "ĠRated": 49949, + "Ġcriminality": 49950, + "Ġbiomark": 49951, + "Ġdispatcher": 49952, + "ĠTowards": 49953, + "Ġ+++": 49954, + "righteous": 49955, + "frog": 49956, + "ĠPanc": 49957, + "Carter": 49958, + "032": 49959, + "æ©Ł": 49960, + "Ġultraviolet": 49961, + "ĠLicensed": 49962, + "ĠTata": 49963, + "ĠBlessing": 49964, + "ĠGAM": 49965, + "Ġchemically": 49966, + "ĠSeaf": 49967, + "ĠRELE": 49968, + "ĠMercenary": 49969, + "capitalist": 49970, + "Ġformulations": 49971, + "Ġannihilation": 49972, + "ĠVerb": 49973, + "ĠArgon": 49974, + "Ġunloaded": 49975, + "Ġmorphed": 49976, + "Ġconquering": 49977, + "backer": 49978, + "IELD": 49979, + "Ġthefts": 49980, + "Ġfrontrunner": 49981, + "ĠRoyale": 49982, + "ĠFundamental": 49983, + "elight": 49984, + "Chip": 49985, + "necessary": 49986, + "ayn": 49987, + "ĠSlip": 49988, + "Ġ448": 49989, + "cerned": 49990, + "Pause": 49991, + "Ġshockingly": 49992, + "ĠABV": 49993, + "Ġcomposure": 49994, + "733": 49995, + "ĠMotorsport": 49996, + "ahime": 49997, + "Murray": 49998, + "Mach": 49999, + "Ġgrids": 50000, + "Ġdebian": 50001, + "Ġfurthermore": 50002, + "Ġdexterity": 50003, + "ĠCollections": 50004, + "oslov": 50005, + "ilage": 50006, + "bj": 50007, + "ĠMonteneg": 50008, + "ĠstrutConnector": 50009, + "Ġmassacres": 50010, + "Ġbriefs": 50011, + "fetched": 50012, + "uvian": 50013, + "olition": 50014, + "Failure": 50015, + "emonic": 50016, + "Ġflared": 50017, + "Ġclaimant": 50018, + "Ġcures": 50019, + "Ġgiveaways": 50020, + "ĠSubstance": 50021, + "alions": 50022, + "Ġcringe": 50023, + "ĠKul": 50024, + "Ġaristocracy": 50025, + "ĠUlster": 50026, + "olated": 50027, + "housing": 50028, + "ĠMIS": 50029, + "Ġglared": 50030, + "ĠWilhelm": 50031, + "needs": 50032, + "lambda": 50033, + "builders": 50034, + "ĠVIS": 50035, + "Ġradiator": 50036, + "ĠGhostbusters": 50037, + "Ġ436": 50038, + "actual": 50039, + "Ġherds": 50040, + "ça": 50041, + "watching": 50042, + "Ġcountering": 50043, + "Charge": 50044, + "Ġcharred": 50045, + "Ġwarheads": 50046, + "Ġiodine": 50047, + "ĠMacy": 50048, + "041": 50049, + "Ġdepartures": 50050, + "ĠSins": 50051, + "Ġdyed": 50052, + "ĠConcepts": 50053, + "gado": 50054, + "713": 50055, + "Ġquotations": 50056, + "Ġgist": 50057, + "ĠChristy": 50058, + "Ġantigen": 50059, + "ĠHemp": 50060, + "ĠDrawn": 50061, + "ĠBarg": 50062, + "ezvous": 50063, + "Ġpaternity": 50064, + "Ġardu": 50065, + "ĠAnchorage": 50066, + "ĠRik": 50067, + "Ġoverloaded": 50068, + "ĠUsername": 50069, + "ĠTammy": 50070, + "ĠNau": 50071, + "ĠCellular": 50072, + "Ġwaning": 50073, + "Ġrodent": 50074, + "ĠWorcester": 50075, + "ilts": 50076, + "ĠTad": 50077, + "Ġdwellings": 50078, + "Ġbullish": 50079, + "431": 50080, + "Ġretaliate": 50081, + "Ġmigraine": 50082, + "ĠChevron": 50083, + "CHECK": 50084, + "Ġdonkey": 50085, + "crim": 50086, + "SPA": 50087, + "ĠAnalog": 50088, + "Ġmarquee": 50089, + "ĠHaas": 50090, + "Bir": 50091, + "ĠGDDR": 50092, + "ĠDownloads": 50093, + "Ġwillpower": 50094, + "ĠForth": 50095, + "ĠRecorded": 50096, + "Ġimpossibility": 50097, + "ĠLogged": 50098, + "ĠFranks": 50099, + "ĠRatt": 50100, + "initions": 50101, + "Ġcleaners": 50102, + "Ġsorely": 50103, + "Ġflickering": 50104, + "ĠExamination": 50105, + "catching": 50106, + "alloween": 50107, + "Msg": 50108, + "Ġdunno": 50109, + "Fa": 50110, + "Ġdysph": 50111, + "crazy": 50112, + ".''.": 50113, + "Ġmainline": 50114, + "Ġcs": 50115, + "Ġptr": 50116, + "ĠWally": 50117, + "igun": 50118, + "951": 50119, + "ĠBigfoot": 50120, + "fights": 50121, + "Ġretrieving": 50122, + "Jr": 50123, + "Ġduplication": 50124, + "ĠExplan": 50125, + "Ġrelational": 50126, + "Ġquaint": 50127, + "Ġbiscuits": 50128, + "Ġado": 50129, + "Ġshudder": 50130, + "Ġantidote": 50131, + "blooded": 50132, + "ksh": 50133, + "Ġsauces": 50134, + "Ġreinvest": 50135, + "Ġdispensary": 50136, + "ĠDiver": 50137, + "Ġ9000": 50138, + "student": 50139, + "Ġinsepar": 50140, + "escap": 50141, + "Ġtoddlers": 50142, + "ĠGPIO": 50143, + "ĠAssignment": 50144, + "headers": 50145, + "Ġlackluster": 50146, + "Ġaback": 50147, + "956": 50148, + "Ġtoolbar": 50149, + "745": 50150, + "Ġoust": 50151, + "Ġcontemplation": 50152, + "ĠPRESIDENT": 50153, + "Ġ458": 50154, + "======": 50155, + "Ġguaranteeing": 50156, + "ĠHeist": 50157, + "ĠCannes": 50158, + "Ļ½": 50159, + "Ġcollaborator": 50160, + "ĠAmp": 50161, + "Ġgou": 50162, + "ĠSHALL": 50163, + "stories": 50164, + "783": 50165, + "Ġmobilized": 50166, + "Ġbrood": 50167, + "ĠLU": 50168, + "ĠðŁij": 50169, + "Ġrefin": 50170, + "ĠAnthropology": 50171, + "vind": 50172, + "illi": 50173, + "Ġwarranties": 50174, + "ĠBabel": 50175, + "Ġswath": 50176, + "Ġcaches": 50177, + "Ġantagonists": 50178, + "artifacts": 50179, + "Ġhotly": 50180, + "ĠStarts": 50181, + "ĠGö": 50182, + "zag": 50183, + "!!!!!": 50184, + "Ġscourge": 50185, + "Ġconspiring": 50186, + "ruits": 50187, + "reverse": 50188, + "ĠSheen": 50189, + "ĠJesuit": 50190, + "ĠGiovanni": 50191, + "adies": 50192, + "Ġbuttocks": 50193, + "earcher": 50194, + "acan": 50195, + "Ġvolleyball": 50196, + "Ġshrouded": 50197, + "Ġscoreboard": 50198, + "bats": 50199, + "ĠIPM": 50200, + "Ġasses": 50201, + "Ġderegulation": 50202, + "ĠTelegram": 50203, + "ĠReboot": 50204, + "Ġ7000": 50205, + "ĠCanary": 50206, + "Ġkernels": 50207, + "ĠFrançois": 50208, + "ĠDuff": 50209, + "ĠPon": 50210, + "ĠLeica": 50211, + "ĠGarmin": 50212, + "Ġorphans": 50213, + "ĠClaudia": 50214, + "Ġcalendars": 50215, + "ĠLeilan": 50216, + "ento": 50217, + "Rocket": 50218, + "Ġbrunch": 50219, + "ĠHawking": 50220, + "ainers": 50221, + "Ġsensibilities": 50222, + "ĠkW": 50223, + "ĠKand": 50224, + "Ġreclaimed": 50225, + "Ġinterestingly": 50226, + "ש": 50227, + "romy": 50228, + "JM": 50229, + "ĠEnhancement": 50230, + "bush": 50231, + "Skip": 50232, + "Ġrappers": 50233, + "Ġgazing": 50234, + "pedia": 50235, + "athlon": 50236, + "Revolution": 50237, + "Ġsnipers": 50238, + "Ġreverted": 50239, + "Ġconglomerate": 50240, + "Terry": 50241, + "794": 50242, + "Ġharsher": 50243, + "Ġdesolate": 50244, + "ĠHitman": 50245, + "Commission": 50246, + "Ġ(/": 50247, + "âĢ¦.\"": 50248, + "Compar": 50249, + "Ġamplification": 50250, + "ominated": 50251, + "Ġregress": 50252, + "ĠCollider": 50253, + "Ġinformants": 50254, + "Ġgazed": 50255, + "<|endoftext|>": 50256 + }, + "merges": [ + "Ġ t", + "Ġ a", + "h e", + "i n", + "r e", + "o n", + "Ġt he", + "e r", + "Ġ s", + "a t", + "Ġ w", + "Ġ o", + "e n", + "Ġ c", + "i t", + "i s", + "a n", + "o r", + "e s", + "Ġ b", + "e d", + "Ġ f", + "in g", + "Ġ p", + "o u", + "Ġa n", + "a l", + "a r", + "Ġt o", + "Ġ m", + "Ġo f", + "Ġ in", + "Ġ d", + "Ġ h", + "Ġan d", + "i c", + "a s", + "l e", + "Ġt h", + "i on", + "o m", + "l l", + "en t", + "Ġ n", + "Ġ l", + "s t", + "Ġ re", + "v e", + "Ġ e", + "r o", + "l y", + "Ġb e", + "Ġ g", + "Ġ T", + "c t", + "Ġ S", + "i d", + "o t", + "Ġ I", + "u t", + "e t", + "Ġ A", + "Ġ is", + "Ġ on", + "i m", + "a m", + "o w", + "a y", + "a d", + "s e", + "Ġth at", + "Ġ C", + "i g", + "Ġf or", + "a c", + "Ġ y", + "v er", + "u r", + "Ġ u", + "l d", + "Ġs t", + "Ġ M", + "' s", + "Ġ he", + "Ġ it", + "at ion", + "it h", + "i r", + "c e", + "Ġy ou", + "i l", + "Ġ B", + "Ġw h", + "o l", + "Ġ P", + "Ġw ith", + "Ġ 1", + "t er", + "c h", + "Ġa s", + "Ġw e", + "Ġ (", + "n d", + "i ll", + "Ġ D", + "i f", + "Ġ 2", + "a g", + "er s", + "k e", + "Ġ \"", + "Ġ H", + "e m", + "Ġc on", + "Ġ W", + "Ġ R", + "he r", + "Ġw as", + "Ġ r", + "o d", + "Ġ F", + "u l", + "at e", + "Ġa t", + "r i", + "p p", + "o re", + "ĠT he", + "Ġs e", + "u s", + "Ġp ro", + "Ġh a", + "u m", + "Ġa re", + "Ġd e", + "a in", + "an d", + "Ġo r", + "ig h", + "es t", + "is t", + "a b", + "r om", + "Ġ N", + "t h", + "Ġc om", + "Ġ G", + "u n", + "o p", + "0 0", + "Ġ L", + "Ġn ot", + "es s", + "Ġe x", + "Ġ v", + "re s", + "Ġ E", + "e w", + "it y", + "an t", + "Ġb y", + "e l", + "o s", + "or t", + "o c", + "q u", + "Ġf rom", + "Ġha ve", + "Ġs u", + "i ve", + "ou ld", + "Ġs h", + "Ġth is", + "n t", + "r a", + "p e", + "igh t", + "ar t", + "m ent", + "Ġa l", + "u st", + "en d", + "- -", + "al l", + "Ġ O", + "ac k", + "Ġc h", + "Ġ le", + "i es", + "re d", + "ar d", + "â Ģ", + "ou t", + "Ġ J", + "Ġa b", + "e ar", + "i v", + "al ly", + "ou r", + "o st", + "g h", + "p t", + "Ġp l", + "as t", + "Ġc an", + "a k", + "om e", + "u d", + "T he", + "Ġh is", + "Ġd o", + "Ġg o", + "Ġh as", + "g e", + "' t", + "Ġ U", + "r ou", + "Ġs a", + "Ġ j", + "Ġb ut", + "Ġw or", + "Ġa ll", + "e ct", + "Ġ k", + "am e", + "Ġw ill", + "o k", + "Ġw he", + "Ġthe y", + "id e", + "0 1", + "f f", + "ic h", + "p l", + "t her", + "Ġt r", + ". .", + "Ġin t", + "i e", + "u re", + "ag e", + "Ġn e", + "i al", + "a p", + "in e", + "ic e", + "Ġm e", + "Ġo ut", + "an s", + "on e", + "on g", + "ion s", + "Ġwh o", + "Ġ K", + "Ġu p", + "Ġthe ir", + "Ġa d", + "Ġ 3", + "Ġu s", + "at ed", + "ou s", + "Ġm ore", + "u e", + "o g", + "ĠS t", + "in d", + "i ke", + "Ġs o", + "im e", + "p er", + ". \"", + "b er", + "i z", + "a ct", + "Ġon e", + "Ġsa id", + "Ġ -", + "a re", + "Ġyou r", + "c c", + "ĠT h", + "Ġc l", + "e p", + "a ke", + "ab le", + "i p", + "Ġcon t", + "Ġwh ich", + "i a", + "Ġ im", + "Ġab out", + "Ġwe re", + "ver y", + "u b", + "Ġh ad", + "Ġ en", + "Ġcom p", + ", \"", + "ĠI n", + "Ġu n", + "Ġa g", + "i re", + "ac e", + "a u", + "ar y", + "Ġw ould", + "as s", + "r y", + "Ġ âĢ", + "c l", + "o ok", + "e re", + "s o", + "Ġ V", + "ig n", + "i b", + "Ġof f", + "Ġt e", + "v en", + "Ġ Y", + "i le", + "o se", + "it e", + "or m", + "Ġ2 01", + "Ġre s", + "Ġm an", + "Ġp er", + "Ġo ther", + "or d", + "ul t", + "Ġbe en", + "Ġl ike", + "as e", + "an ce", + "k s", + "ay s", + "ow n", + "en ce", + "Ġd is", + "ct ion", + "Ġan y", + "Ġa pp", + "Ġs p", + "in t", + "res s", + "ation s", + "a il", + "Ġ 4", + "ic al", + "Ġthe m", + "Ġhe r", + "ou nt", + "ĠC h", + "Ġa r", + "Ġ if", + "Ġthe re", + "Ġp e", + "Ġy ear", + "a v", + "Ġm y", + "Ġs ome", + "Ġwhe n", + "ou gh", + "ac h", + "Ġth an", + "r u", + "on d", + "ic k", + "Ġo ver", + "ve l", + "Ġ qu", + "Ċ Ċ", + "Ġs c", + "re at", + "re e", + "ĠI t", + "ou nd", + "p ort", + "Ġal so", + "Ġp art", + "f ter", + "Ġk n", + "Ġbe c", + "Ġt ime", + "en s", + "Ġ 5", + "op le", + "Ġwh at", + "Ġn o", + "d u", + "m er", + "an g", + "Ġn ew", + "-- --", + "Ġg et", + "or y", + "it ion", + "ing s", + "Ġj ust", + "Ġint o", + "Ġ 0", + "ent s", + "o ve", + "t e", + "Ġpe ople", + "Ġp re", + "Ġit s", + "Ġre c", + "Ġt w", + "i an", + "ir st", + "ar k", + "or s", + "Ġwor k", + "ad e", + "o b", + "Ġs he", + "Ġo ur", + "w n", + "in k", + "l ic", + "Ġ1 9", + "ĠH e", + "is h", + "nd er", + "au se", + "Ġh im", + "on s", + "Ġ [", + "Ġ ro", + "f orm", + "i ld", + "at es", + "ver s", + "Ġon ly", + "o ll", + "Ġs pe", + "c k", + "e ll", + "am p", + "Ġa cc", + "Ġb l", + "i ous", + "ur n", + "f t", + "o od", + "Ġh ow", + "he d", + "Ġ '", + "Ġa fter", + "a w", + "Ġat t", + "o v", + "n e", + "Ġpl ay", + "er v", + "ic t", + "Ġc ould", + "it t", + "Ġa m", + "Ġf irst", + "Ġ 6", + "Ġa ct", + "Ġ $", + "e c", + "h ing", + "u al", + "u ll", + "Ġcom m", + "o y", + "o ld", + "c es", + "at er", + "Ġf e", + "Ġbe t", + "w e", + "if f", + "Ġtw o", + "oc k", + "Ġb ack", + ") .", + "id ent", + "Ġu nder", + "rou gh", + "se l", + "x t", + "Ġm ay", + "rou nd", + "Ġp o", + "p h", + "is s", + "Ġd es", + "Ġm ost", + "Ġd id", + "Ġad d", + "j ect", + "Ġin c", + "f ore", + "Ġp ol", + "on t", + "Ġag ain", + "cl ud", + "ter n", + "Ġkn ow", + "Ġne ed", + "Ġcon s", + "Ġc o", + "Ġ .", + "Ġw ant", + "Ġse e", + "Ġ 7", + "n ing", + "i ew", + "ĠTh is", + "c ed", + "Ġe ven", + "Ġin d", + "t y", + "ĠW e", + "at h", + "Ġthe se", + "Ġp r", + "Ġu se", + "Ġbec ause", + "Ġf l", + "n g", + "Ġn ow", + "ĠâĢ ĵ", + "c om", + "is e", + "Ġm ake", + "Ġthe n", + "ow er", + "Ġe very", + "ĠU n", + "Ġse c", + "os s", + "u ch", + "Ġe m", + "Ġ =", + "ĠR e", + "i ed", + "r it", + "Ġin v", + "le ct", + "Ġsu pp", + "at ing", + "Ġl ook", + "m an", + "pe ct", + "Ġ 8", + "ro w", + "Ġb u", + "Ġwhe re", + "if ic", + "Ġyear s", + "i ly", + "Ġd iff", + "Ġsh ould", + "Ġre m", + "T h", + "I n", + "Ġe v", + "d ay", + "' re", + "ri b", + "Ġre l", + "s s", + "Ġde f", + "Ġr ight", + "Ġs y", + ") ,", + "l es", + "00 0", + "he n", + "Ġth rough", + "ĠT r", + "_ _", + "Ġw ay", + "Ġd on", + "Ġ ,", + "Ġ1 0", + "as ed", + "Ġas s", + "ub lic", + "Ġre g", + "ĠA nd", + "i x", + "Ġ very", + "Ġin clud", + "ot her", + "Ġim p", + "ot h", + "Ġsu b", + "ĠâĢ Ķ", + "Ġbe ing", + "ar g", + "ĠW h", + "= =", + "ib le", + "Ġdo es", + "an ge", + "r am", + "Ġ 9", + "er t", + "p s", + "it ed", + "ation al", + "Ġb r", + "Ġd own", + "Ġman y", + "ak ing", + "Ġc all", + "ur ing", + "it ies", + "Ġp h", + "ic s", + "al s", + "Ġde c", + "at ive", + "en er", + "Ġbe fore", + "il ity", + "Ġwe ll", + "Ġm uch", + "ers on", + "Ġth ose", + "Ġsu ch", + "Ġ ke", + "Ġ end", + "ĠB ut", + "as on", + "t ing", + "Ġl ong", + "e f", + "Ġth ink", + "y s", + "Ġbe l", + "Ġs m", + "it s", + "a x", + "Ġo wn", + "Ġpro v", + "Ġs et", + "if e", + "ment s", + "b le", + "w ard", + "Ġsh ow", + "Ġp res", + "m s", + "om et", + "Ġo b", + "Ġs ay", + "ĠS h", + "t s", + "f ul", + "Ġe ff", + "Ġg u", + "Ġin st", + "u nd", + "re n", + "c ess", + "Ġ ent", + "ĠY ou", + "Ġgo od", + "Ġst art", + "in ce", + "Ġm ade", + "t t", + "st em", + "ol og", + "u p", + "Ġ |", + "um p", + "Ġhe l", + "ver n", + "ul ar", + "u ally", + "Ġa c", + "Ġm on", + "Ġl ast", + "Ġ2 00", + "1 0", + "Ġst ud", + "u res", + "ĠA r", + "sel f", + "ar s", + "mer ic", + "u es", + "c y", + "Ġm in", + "oll ow", + "Ġc ol", + "i o", + "Ġm od", + "Ġc ount", + "ĠC om", + "he s", + "Ġf in", + "a ir", + "i er", + "âĢ Ķ", + "re ad", + "an k", + "at ch", + "e ver", + "Ġst r", + "Ġpo int", + "or k", + "ĠN ew", + "Ġs ur", + "o ol", + "al k", + "em ent", + "Ġus ed", + "ra ct", + "we en", + "Ġs ame", + "ou n", + "ĠA l", + "c i", + "Ġdiff ere", + "Ġwh ile", + "---- ----", + "Ġg ame", + "ce pt", + "Ġs im", + ".. .", + "Ġin ter", + "e k", + "Ġre port", + "Ġpro du", + "Ġst ill", + "l ed", + "a h", + "Ġhe re", + "Ġwor ld", + "Ġth ough", + "Ġn um", + "ar ch", + "im es", + "al e", + "ĠS e", + "ĠI f", + "/ /", + "ĠL e", + "Ġre t", + "Ġre f", + "Ġtr ans", + "n er", + "ut ion", + "ter s", + "Ġt ake", + "ĠC l", + "Ġcon f", + "w ay", + "a ve", + "Ġgo ing", + "Ġs l", + "u g", + "ĠA meric", + "Ġspe c", + "Ġh and", + "Ġbet ween", + "ist s", + "ĠD e", + "o ot", + "I t", + "Ġe ar", + "Ġagain st", + "Ġh igh", + "g an", + "a z", + "at her", + "Ġex p", + "Ġo p", + "Ġin s", + "Ġg r", + "Ġhel p", + "Ġre qu", + "et s", + "in s", + "ĠP ro", + "is m", + "Ġf ound", + "l and", + "at a", + "us s", + "am es", + "Ġp erson", + "Ġg reat", + "p r", + "Ġs ign", + "ĠA n", + "' ve", + "Ġs omet", + "Ġs er", + "h ip", + "Ġr un", + "Ġ :", + "Ġt er", + "ire ct", + "Ġf ollow", + "Ġd et", + "ic es", + "Ġf ind", + "1 2", + "Ġm em", + "Ġc r", + "e red", + "e x", + "Ġex t", + "ut h", + "en se", + "c o", + "Ġte am", + "v ing", + "ou se", + "as h", + "at t", + "v ed", + "Ġsy stem", + "ĠA s", + "d er", + "iv es", + "m in", + "Ġle ad", + "ĠB l", + "c ent", + "Ġa round", + "Ġgo vern", + "Ġc ur", + "vel op", + "an y", + "Ġc our", + "al th", + "ag es", + "iz e", + "Ġc ar", + "od e", + "Ġl aw", + "Ġre ad", + "' m", + "c on", + "Ġre al", + "Ġsupp ort", + "Ġ1 2", + ".. ..", + "Ġre ally", + "n ess", + "Ġf act", + "Ġd ay", + "Ġb oth", + "y ing", + "Ġs erv", + "ĠF or", + "Ġth ree", + "Ġw om", + "Ġm ed", + "od y", + "ĠThe y", + "5 0", + "Ġex per", + "t on", + "Ġe ach", + "ak es", + "Ġc he", + "Ġc re", + "in es", + "Ġre p", + "1 9", + "g g", + "ill ion", + "Ġg rou", + "ut e", + "i k", + "W e", + "g et", + "E R", + "Ġm et", + "Ġs ays", + "o x", + "Ġd uring", + "er n", + "iz ed", + "a red", + "Ġf am", + "ic ally", + "Ġha pp", + "ĠI s", + "Ġch ar", + "m ed", + "v ent", + "Ġg ener", + "i ent", + "p le", + "i et", + "re nt", + "1 1", + "v es", + "pt ion", + "Ġ2 0", + "form ation", + "Ġc or", + "Ġoff ic", + "ie ld", + "Ġto o", + "is ion", + "Ġin f", + "Ġ Z", + "t he", + "o ad", + "Ġp ublic", + "Ġpro g", + "r ic", + "* *", + "Ġw ar", + "Ġp ower", + "v iew", + "Ġf ew", + "Ġl oc", + "Ġdiffere nt", + "Ġst ate", + "Ġhe ad", + "' ll", + "Ġp oss", + "Ġst at", + "re t", + "ant s", + "Ġv al", + "Ġis s", + "Ġc le", + "i vers", + "an c", + "Ġex pl", + "Ġan other", + "Ġ Q", + "Ġa v", + "th ing", + "n ce", + "W h", + "Ġch ild", + "Ġs ince", + "i red", + "l ess", + "Ġl ife", + "Ġde velop", + "itt le", + "Ġde p", + "Ġp ass", + "ã ĥ", + "Ġt urn", + "or n", + "Th is", + "b ers", + "ro ss", + "ĠA d", + "Ġf r", + "Ġres p", + "Ġsec ond", + "o h", + "Ġ /", + "Ġdis c", + "Ġ &", + "Ġsomet hing", + "Ġcomp le", + "Ġ ed", + "Ġf il", + "Ġmon th", + "a j", + "u c", + "Ġgovern ment", + "Ġwith out", + "Ġle g", + "Ġd ist", + "Ġp ut", + "Ġqu est", + "an n", + "Ġpro t", + "2 0", + "Ġne ver", + "i ence", + "Ġle vel", + "Ġar t", + "Ġth ings", + "Ġm ight", + "Ġeff ect", + "Ġcont ro", + "Ġc ent", + "Ġ1 8", + "Ġall ow", + "Ġbel ie", + "ch ool", + "ot t", + "Ġinc re", + "Ġfe el", + "Ġres ult", + "Ġl ot", + "Ġf un", + "ot e", + "Ġt y", + "ere st", + "Ġcont in", + "Ġus ing", + "Ġb ig", + "2 01", + "Ġas k", + "Ġb est", + "Ġ )", + "I N", + "Ġo pp", + "3 0", + "Ġnum ber", + "in ess", + "S t", + "le ase", + "Ġc a", + "Ġm ust", + "Ġd irect", + "Ġg l", + "Ġ <", + "Ġop en", + "Ġp ost", + "Ġcom e", + "Ġse em", + "ord ing", + "Ġwe ek", + "ate ly", + "it al", + "Ġe l", + "ri end", + "Ġf ar", + "Ġt ra", + "in al", + "Ġp ri", + "ĠU S", + "Ġpl ace", + "Ġfor m", + "Ġto ld", + "\" :", + "ain s", + "at ure", + "ĠTr ump", + "Ġst and", + "Ġ #", + "id er", + "ĠF r", + "Ġne xt", + "Ġs oc", + "Ġp ur", + "Ġle t", + "Ġl ittle", + "Ġh um", + "Ġ i", + "r on", + "1 5", + "Ġ1 5", + "Ġcomm un", + "Ġm ark", + "ĠThe re", + "Ġw r", + "ĠTh at", + "Ġin formation", + "w ays", + "Ġb us", + "a pp", + "Ġinv est", + "m e", + "Ġh ard", + "ain ed", + "e ad", + "Ġim port", + "Ġapp ro", + "Ġt est", + "Ġt ri", + "Ġre st", + "os ed", + "Ġf ull", + "Ġc are", + "ĠS p", + "Ġc ase", + "O N", + "Ġs k", + "Ġl ess", + "Ġ +", + "Ġpart ic", + "ĠP l", + "ab ly", + "u ck", + "is hed", + "ch n", + "b e", + "Ġl ist", + "at or", + "Ġto p", + "Ġad v", + "ĠB e", + "ru ct", + "Ġd em", + "r ation", + "l ing", + "g y", + "re en", + "g er", + "Ġh ome", + "Ġle ft", + "Ġbet ter", + "Ġd ata", + "Ġ1 1", + "Ġatt ack", + "Ġpro ble", + "l ine", + "ard s", + "Ġbe h", + "r al", + "ĠH ow", + "ĠS he", + "ar ge", + "Ġ --", + ": //", + "Ġb ro", + "ĠP h", + "at s", + "Ġbu ild", + "w w", + "id ed", + "a im", + "as es", + "en cy", + "Ġm ain", + "in ed", + "Ġinclud ing", + "Ġ {", + "Ġg ot", + "Ġint erest", + "Ġke ep", + "Ġ X", + "Ġe as", + "ain ing", + "Ġcl ass", + "âĢ ¦", + "ĠN o", + "Ġv ar", + "Ġsm all", + "amp le", + "A T", + "Ġ ide", + "ĠS o", + "Ġre ce", + "Ġpol it", + "Ġm ov", + "Ġpl an", + "Ġper cent", + "iv ing", + "Ġc amp", + "Ġp ay", + "1 4", + "s c", + "is ed", + "Ġu nt", + "one y", + "pl oy", + "== ==", + "Ġdid n", + "ĠI nd", + "el s", + "ert ain", + "Ġp os", + "__ __", + "i ver", + "Ġpro cess", + "Ġprog ram", + "if ied", + "ĠR ep", + "1 6", + "u ro", + "olog y", + "at ter", + "in a", + "Ġn ame", + "ĠA ll", + "Ġf our", + "Ġret urn", + "v ious", + "b s", + "Ġcall ed", + "Ġm ove", + "ĠS c", + "ir d", + "Ġgrou p", + "Ġb re", + "Ġm en", + "Ġc ap", + "t en", + "e e", + "Ġd ri", + "le g", + "he re", + "uth or", + "Ġp at", + "Ġcur rent", + "id es", + "Ġp op", + "t o", + "ent ion", + "Ġal ways", + "Ġm il", + "Ġwom en", + "Ġ1 6", + "Ġo ld", + "iv en", + "ra ph", + "ĠO r", + "r or", + "ent ly", + "Ġn ear", + "ĠE x", + "re am", + "s h", + "Ġ1 4", + "Ġf ree", + "iss ion", + "st and", + "ĠC on", + "al ity", + "us ed", + "1 3", + "Ġdes ign", + "Ġch ange", + "Ġch ang", + "Ġb o", + "Ġv is", + "em ber", + "Ġb ook", + "read y", + "Ġk ill", + "2 5", + "pp ed", + "Ġa way", + "Ġab le", + "Ġcount ry", + "Ġcon st", + "ar n", + "Ġor der", + "A R", + "i or", + "i um", + "or th", + "1 8", + "ail able", + "Ġs w", + "Ġm illion", + "Ġ1 3", + "at ic", + "t ed", + "ĠG o", + "Ġo per", + "en g", + "Ġth ing", + "aj or", + "con om", + "ĠCom m", + "Ġwh y", + "u red", + "ur al", + "Ġs chool", + "b y", + "ĠM ar", + "Ġa ff", + "Ġd ays", + "Ġan n", + "us h", + "an e", + "I f", + "e g", + "Ġpro f", + "Ġhe alth", + "ou th", + "B ut", + "ion al", + ". ,", + "Ġs ol", + "Ġal ready", + "Ġ3 0", + "Ġchar act", + "H e", + "Ġf riend", + "E S", + "i ans", + "ic le", + "' d", + "ĠO n", + "Ġle ast", + "Ġp rom", + "Ġd r", + "Ġh ist", + "it her", + "Ġ est", + "i qu", + "1 7", + "s on", + "Ġte ll", + "Ġt alk", + "oh n", + "o int", + "le ction", + "A N", + "Ġunt il", + "au gh", + "Ġl ater", + "Ġ ve", + "Ġv iew", + "end ing", + "iv ed", + "Ġwor d", + "w are", + "Ġc ost", + "Ġen ough", + "Ġg ive", + "ĠUn ited", + "Ġte chn", + "are nt", + "O R", + "Ġp ar", + "ĠD r", + "Ġ201 6", + "r ist", + "er ing", + "Ġ Â", + "Ġl arge", + "s ide", + "ac y", + "cc ess", + "Ġw in", + "Ġimport ant", + "Ġ19 9", + "Ġdoes n", + "Ġ1 7", + "Ġbus iness", + "Ġcle ar", + "Ġre se", + "\" ,", + "ur y", + "Ġe qu", + "as ter", + "al f", + "ĠAmeric an", + "n ect", + "Ġex pect", + "ivers ity", + "Ġo cc", + "ĠF l", + "Ġk ind", + "Ġme an", + "Ġp ast", + "Ġde v", + "Ġb as", + "le t", + "ra ft", + "Ġor gan", + "Ġde l", + "Ġper form", + "Ġst ory", + "Ġse ason", + "ĠC ol", + "Ġcl aim", + "Ġc ame", + "Ġwith in", + "Ġl ine", + "Ġpro ject", + "ĠA t", + "Ġcontro l", + "end ed", + "ĠS y", + "Ġa ir", + "iz ation", + "Ġ *", + "le y", + "Ġm oney", + "id d", + "Y ou", + "f or", + "Ġfam ily", + "Ġm aking", + "Ġb it", + "Ġpol ice", + "Ġhapp en", + "Ġ vers", + "on y", + "u ff", + "ĠW hen", + "Ġs it", + "ide o", + "l f", + "is on", + "Ġsu re", + "g in", + "Ġapp ear", + "Ġl ight", + "Ġ es", + "o f", + "Ġw ater", + "Ġt imes", + "n ot", + "Ġg row", + "Ġcomp any", + "ĠT e", + "ow s", + "Ġm ar", + "our ce", + "i ol", + "ar m", + "b r", + "Ġex ample", + "Ġcon c", + "Ġf ore", + "ĠT o", + "p ro", + "E N", + "ri es", + "Ġ2 5", + "ĠC an", + "ne y", + "Ġact ually", + "Ġe ver", + "ur ity", + "ak en", + "ap s", + "Ġt ax", + "Ġm ajor", + "am a", + "Ġof ten", + "er al", + "Ġhum an", + "Ġj ob", + "is ter", + "Ġav ailable", + "oc r", + "en n", + "a id", + "iv id", + "Ġrec ord", + "? \"", + "Ġs ing", + "ĠA m", + "id ence", + "Ġnew s", + "st er", + "Ġe conom", + "Ġfollow ing", + "ĠB r", + "is ing", + "Ġh our", + "m ost", + "um ent", + "Ġse x", + "Ġdes c", + "Ġbec ome", + "ĠE d", + "Ġto ok", + "Ġha ving", + "Ġprodu ct", + "a ult", + "A s", + "ar ing", + "Ġme ans", + "Ġh op", + "un e", + "Ġch o", + "Ġc ertain", + "Ġn on", + "Ġde al", + "2 4", + "le ment", + "oc i", + "en e", + "Ġs ide", + "ĠP r", + "ĠM ay", + "Ġre ason", + "u ed", + "c hed", + "ul ation", + "Ġe lect", + "Ġoffic ial", + "Ġposs ible", + "Ġh old", + "and s", + "ot s", + "Ġc ity", + "or ies", + "Ġse ver", + "Ġchild ren", + "Ġon ce", + "Ġact iv", + "l er", + "Ġn ight", + "it ions", + "ĠJ ohn", + "a pe", + "pl ay", + "Ġd one", + "Ġl im", + "Ġwork ing", + "ĠP res", + "or ld", + "e b", + "ĠC o", + "Ġb ody", + "ail s", + "ut es", + "ĠM r", + "Ġwhe ther", + "Ġa uthor", + "ro p", + "Ġpro per", + "Ġse en", + ") ;", + "Ġf ac", + "ĠS u", + "Ġcon d", + "it ing", + "Ġcour se", + "Ġ }", + "-------- --------", + "a ign", + "Ġev ent", + "Ġen g", + "Ġp ot", + "Ġin tern", + "i am", + "Ġsh ort", + "em pt", + "ã Ĥ", + "ĠG od", + "il ar", + "8 0", + "Ġor ig", + "I S", + "our n", + "ab ility", + "it ive", + "Ġd am", + "Ġ1 00", + "Ġp ress", + "Ġdo ing", + "Ġprot ect", + "r ing", + "Ġthough t", + "Ġquest ion", + "re w", + "ĠW ar", + "Ġsever al", + "ĠSt ate", + "Ġg iven", + "Ġf und", + "ĠT w", + "Ġw ent", + "an ces", + "w ork", + "p or", + "m y", + "4 0", + "Ġar g", + "art ment", + "ust om", + "Ġpol ic", + "Ġme et", + "Ġc reat", + "2 2", + "ĠSt ates", + "Ġg ames", + "ra w", + "ut ure", + "Ġunder stand", + "ur s", + "ĠO b", + "l ish", + "s y", + "Ġm akes", + "Ġw on", + "ag on", + "Ġh tt", + "Ġl ove", + "ent ial", + "Ġcomple te", + "p ar", + "ĠI m", + "A L", + "Ġacc ount", + " ł", + "ore d", + "ver t", + "Ġ ident", + "Ġ201 5", + "Ġother s", + "ĠM in", + "i ber", + "ver age", + "The re", + "ition al", + "d d", + "Ġpro b", + "Ġyou ng", + "Ġal ong", + "Ġacc ording", + "Ġy et", + "Ġmem bers", + "ĠWh at", + "o id", + "ĠM an", + "A nd", + "Ġam ong", + "a i", + "Ġem ploy", + "ĠR es", + "Ġ >", + "Ġinv ol", + "Ġl ow", + "a f", + "ĠC ar", + "Ġh ig", + "ĠO ne", + "ĠS ec", + "in ation", + "Ġlike ly", + "Ġan t", + "ag ed", + "ĠR uss", + "Ġb en", + "Ġre le", + "F or", + "b ack", + "ĠN ot", + "Ġpres ident", + "b all", + "Ġacc ess", + "ivid ual", + "ĠD em", + "ĠE uro", + "6 0", + "Ġkn own", + "ir l", + "ĠG r", + "Ġear ly", + "u se", + "iet y", + "âĢ ĵ", + "Ġf ight", + "Ġs ent", + "Ġto day", + "Ġmark et", + "\" .", + "Ġb ased", + "Ġstr ong", + "ur ther", + "Ġde b", + "m ber", + "Ġproble m", + "Ġde ath", + "Ġsoc ial", + "im ate", + "A S", + "ort un", + "Ġcamp aign", + "er y", + "C h", + "Ġe y", + "i ally", + "Ġm us", + "w h", + "p os", + "Ġ er", + "Ġsa f", + "Ġmonth s", + "ir on", + "Ġv iol", + "Ġf ive", + "Ġst re", + "Ġplay ers", + "in c", + "al d", + "y ear", + "a un", + "Ġsu ccess", + "Ġpres ent", + "ere nce", + "Ġ201 4", + "Ġsu gg", + "Ġpartic ular", + "Ġtr y", + "Ġsugg est", + "ĠCh rist", + "on es", + "Ġpri v", + "2 3", + "Ġc rit", + "Ġl and", + "Ġloc al", + "if y", + "2 9", + "Ġa ut", + "E D", + "ĠG u", + "Ġm ult", + "Ġpolit ical", + "Ġask ed", + "Ġfor mer", + "it ter", + "ri pt", + "Ġcl ose", + "Ġp ract", + "ĠY ork", + "Ġget ting", + "Ġac ross", + "Ġcom b", + "Ġbelie ve", + "Ġ z", + "Ġto get", + "Ġtoget her", + "ĠC ent", + "ir c", + "Ġind ividual", + "ĠM c", + "2 7", + "is k", + "ĠE ng", + "Ġf ace", + "Ġ2 4", + "Ġval ue", + "Ġare a", + "e v", + "Ġw rit", + "ĠPres ident", + "Ġv ot", + "Ġke y", + "Ġm om", + "p ut", + "Ġany thing", + "Ġexper ience", + "att le", + "Ġm ind", + "a ff", + "om m", + "Ġf uture", + "g ed", + "Ġc ut", + "Ġto t", + "it ch", + "Ġv ideo", + "Ġinvest ig", + "Ġn et", + "ĠM y", + "r ict", + "i en", + ". )", + "Ġimp ro", + "th ough", + "ward s", + "Ġcon nect", + "ĠM ed", + "sel ves", + "ens ive", + "m b", + "o ber", + "at ors", + "A n", + "Ġ5 0", + "Ġre du", + "res ent", + "Ġab ove", + "Ġf re", + "ĠEuro pe", + "s w", + "Ġam ount", + "ĠA pp", + "Ġe ither", + "Ġmil it", + "Ġan al", + "Ġf ail", + "ĠE n", + "al es", + "Ġspec ial", + "Ġbl ack", + "I T", + "c her", + "Ġlook ing", + "Ġf ire", + "y n", + "Ġal most", + "o on", + "Ġstud y", + "Ġm iss", + "c hes", + "ro wn", + "Ġt re", + "Ġcommun ity", + "Ġmed ia", + "Ġf ood", + "Ġcom es", + "ĠUn iversity", + "Ġsing le", + "Wh at", + "u ly", + "Ġh alf", + "ag ue", + "h od", + "ĠRep ublic", + "Ġstart ed", + "Ġqu ick", + "ot o", + "b ook", + "Ġiss ue", + "it or", + "Ġel se", + "Ġcons ider", + "2 6", + "ro du", + "Ġt aken", + "2 8", + "9 9", + "ĠW ith", + "Ġtr ue", + "Ġw a", + "Ġtr ad", + "Ġag o", + "Ġm ess", + "ie f", + "Ġadd ed", + "o ke", + "Ġb ad", + "Ġf av", + "3 3", + "Ġsim ilar", + "as k", + "ĠD on", + "Ġcharact er", + "ort s", + "ĠH ouse", + "Ġreport ed", + "Ġty pe", + "v al", + "i od", + "ĠHow ever", + "Ġt arg", + "Ġent ire", + "pp ing", + "Ġhist ory", + "Ġl ive", + "ff ic", + ".... ....", + "ed eral", + "Ġtr ying", + "Ġdisc uss", + "ĠH ar", + "ac es", + "l ished", + "Ġse lf", + "os p", + "re st", + "Ġro om", + "el t", + "Ġf all", + "ol ution", + "Ġe t", + "Ġ x", + "Ġis n", + "Ġide a", + "b o", + "Ġs ound", + "ĠD ep", + "Ġsome one", + "ci ally", + "ull y", + "Ġf oc", + "Ġob ject", + "if t", + "ap er", + "Ġplay er", + "Ġr ather", + "Ġserv ice", + "as hing", + "ĠD o", + "ĠP art", + "ru g", + "m on", + "p ly", + "Ġm or", + "Ġnot hing", + "Ġprov ide", + "I C", + "un g", + "Ġpart y", + "Ġex ist", + "Ġm ag", + "7 0", + "Ġr ul", + "Ġh ouse", + "Ġbeh ind", + "Ġhow ever", + "ĠW orld", + "Ġs um", + "Ġapp lic", + "Ġ ;", + "Ġfun ction", + "g r", + "ĠP ol", + "Ġfr ont", + "2 00", + "Ġser ies", + "Ġt em", + "Ġty p", + "ill s", + "Ġo pt", + "Ġpoint s", + "Ġbel ow", + "itt ed", + "Ġspec ific", + "Ġ201 7", + "um b", + "Ġr a", + "Ġpre vious", + "Ġpre t", + "re me", + "Ġc ustom", + "Ġcour t", + "ĠM e", + "Ġre pl", + "Ġwho le", + "g o", + "c er", + "Ġt reat", + "ĠA ct", + "Ġprob ably", + "Ġle arn", + "end er", + "ĠA ss", + "Ġvers ion", + "n ow", + "Ġche ck", + "ĠC al", + "R E", + "min ist", + "O n", + "our ces", + "Ġben ef", + "Ġd oc", + "Ġdet er", + "Ġen c", + "Ġsu per", + "Ġadd ress", + "Ġv ict", + "Ġ201 3", + "Ġme as", + "t r", + "Ġf ield", + "W hen", + "Ġsign ific", + "u ge", + "Ġfe at", + "Ġcomm on", + "l oad", + "Ġbe gin", + "Ġbr ing", + "Ġa ction", + "er man", + "Ġdesc rib", + "Ġind ust", + "Ġwant ed", + "ri ed", + "m ing", + "Ġatt empt", + "4 5", + "f er", + "Ġd ue", + "ress ion", + "# #", + "Ġsh all", + "Ġs ix", + "o o", + "Ġst ep", + "Ġp ub", + "Ġhim self", + "Ġ2 3", + "Ġc op", + "Ġd est", + "Ġst op", + "A C", + "ib ility", + "Ġl ab", + "ic ult", + "Ġhour s", + "Ġcre ate", + "Ġf urther", + "ĠAmeric a", + "ĠC ity", + "Ġd ou", + "he ad", + "S T", + "ĠN orth", + "c ing", + "Ġn ational", + "u le", + "ĠIn st", + "Ġt aking", + "ĠQ u", + "ir t", + "Ġre d", + "Ġrese arch", + "v iron", + "ĠG e", + "Ġbre ak", + "an a", + "Ġsp ace", + "ater ial", + "Ġrec ent", + "ĠA b", + "Ġgener al", + "Ġh it", + "Ġper iod", + "Ġevery thing", + "ive ly", + "Ġph ys", + "Ġsay ing", + "an ks", + "Ġc ou", + "Ġc ult", + "ac ed", + "e al", + "u ation", + "Ġc oun", + "l u", + "Ġinclud e", + "Ġpos ition", + "ĠA fter", + "ĠCan ad", + "ĠE m", + "Ġim m", + "ĠR ed", + "Ġp ick", + "Ġcom pl", + "Ġm atter", + "re g", + "e xt", + "ang u", + "is c", + "o le", + "a ut", + "Ġcomp et", + "e ed", + "f ect", + "Ġ2 1", + "ĠS en", + "ĠThe se", + "as ing", + "Ġcan not", + "Ġin it", + "Ġrel ations", + "ac hed", + "Ġb ar", + "Ġ4 0", + "ĠT H", + "Ġ201 2", + "Ġv ol", + "Ġg round", + "Ġsec urity", + "Ġup d", + "il t", + "3 5", + "Ġconc ern", + "ĠJ ust", + "Ġwh ite", + "Ġseem s", + "ĠH er", + "pe cially", + "i ents", + "Ġann oun", + "Ġf ig", + "ight s", + "Ġst ri", + "l ike", + "id s", + "Ġs us", + "Ġw atch", + "Ġ â", + "Ġw ind", + "ĠC ont", + "Ġit self", + "Ġm ass", + "A l", + "y le", + "iqu e", + "ĠN ational", + "Ġab s", + "Ġp ack", + "Ġout side", + "Ġan im", + "Ġp ain", + "et er", + "Ġman ag", + "du ct", + "og n", + "Ġ ]", + "ĠSe pt", + "se c", + "o ff", + "ĠJ an", + "Ġf oot", + "ad es", + "Ġth ird", + "Ġm ot", + "Ġev idence", + "int on", + "Ġth reat", + "a pt", + "pl es", + "c le", + "Ġl o", + "Ġde cl", + "Ġit em", + "med i", + "Ġrep resent", + "om b", + "am er", + "Ġsignific ant", + "og raph", + "s u", + "Ġc al", + "i res", + "00 00", + "I D", + "A M", + "Ġsim ply", + "Ġlong er", + "Ġf ile", + "O T", + "c he", + "S o", + "ate g", + "or g", + "ĠH is", + "Ġen er", + "Ġd om", + "Ġup on", + "il i", + "\": \"", + "Ġthem selves", + "Ġcom ing", + "Ġqu ite", + "Ġdiff icult", + "ĠB ar", + "il ities", + "re l", + "end s", + "c ial", + "6 4", + "Ġwom an", + "ra p", + "y r", + "Ġne cess", + "ip s", + "Ġte xt", + "Ġrequ ire", + "Ġmilit ary", + "Ġre view", + "Ġresp ons", + "7 5", + "Ġsub ject", + "Ġinst ead", + "Ġiss ues", + "Ġg en", + "\" ,\"", + "Ġmin utes", + "Ġwe ap", + "r ay", + "am ed", + "t ime", + "b l", + "H ow", + "Ġc ode", + "ĠS m", + "Ġhig her", + "ĠSt e", + "r is", + "Ġp age", + "Ġstud ents", + "ĠIn tern", + "Ġmet hod", + "ĠA ug", + "ĠP er", + "ĠA g", + "Ġpolic y", + "ĠS w", + "Ġex ec", + "Ġac cept", + "um e", + "rib ut", + "Ġword s", + "Ġfin al", + "Ġchang es", + "ĠDem ocr", + "Ġfriend s", + "Ġres pect", + "Ġe p", + "Ġcomp an", + "iv il", + "Ġdam age", + "** **", + "og le", + "viron ment", + "Ġne g", + "ent al", + "Ġa p", + "Ġtot al", + "iv al", + "! \"", + "l im", + "Ġneed s", + "Ġag re", + "Ġdevelop ment", + "Ġa ge", + "ip le", + "2 1", + "Ġresult s", + "ĠA f", + "S h", + "Ġg un", + "ĠOb ama", + "ro ll", + "Ġ @", + "Ġright s", + "ĠB rit", + "Ġrun ning", + "Ġwas n", + "Ġp ort", + "Ġr ate", + "Ġpret ty", + "Ġtarg et", + "Ġsa w", + "Ġc irc", + "Ġwor ks", + "ic ro", + "al t", + "o ver", + "ww w", + "Th at", + "l ier", + "Ġevery one", + "ud e", + "Ġp ie", + "idd le", + "ra el", + "Ġr ad", + "Ġbl ock", + "Ġw alk", + "T o", + "ã ģ", + "n es", + "ĠA ust", + "a ul", + "ro te", + "ĠS outh", + "ess ion", + "op h", + "Ġshow s", + "Ġs ite", + "Ġj o", + "Ġr isk", + "cl us", + "l t", + "Ġin j", + "id ing", + "ĠS pe", + "Ġch all", + "ir m", + "Ġ2 2", + "itt ing", + "st r", + "Ġh y", + "L E", + "ke y", + "Ġbe gan", + "at ur", + "ashing ton", + "l am", + "ĠD av", + "b it", + "Ġs ize", + "ĠP ar", + "3 8", + "ourn al", + "f ace", + "Ġdec ision", + "Ġl arg", + "Ġj ud", + "re ct", + "Ġcontin ue", + "ĠO ct", + "ove red", + "ĠI nt", + "==== ====", + "Ġp arent", + "ĠW ill", + "Ġeas y", + "Ġd rug", + "ang er", + "Ġs ense", + "Ġd i", + "id ay", + "Ġener gy", + "ist ic", + "Ġass oci", + "ar ter", + "ob al", + "e ks", + "ĠE l", + "ur ch", + "Ġg irl", + "o e", + "it le", + "Ġ2 8", + "ĠC he", + "Ġrequ est", + "Ġso on", + "Ġh ost", + "k y", + "Ġst ates", + "om es", + "Ġm aterial", + "le x", + "Ġmom ent", + "Ġan sw", + "on se", + "Ġes pecially", + "Ġn orm", + "Ġserv ices", + "p ite", + "r an", + "Ġro le", + "4 4", + ") :", + "Ġc red", + "C l", + "____ ____", + "Ġm at", + "Ġl og", + "ĠCl inton", + "O U", + "Ġoff ice", + "Ġ2 6", + "Ġch arg", + "Ġtr ack", + "m a", + "Ġhe art", + "Ġb all", + "Ġperson al", + "Ġbuild ing", + "n a", + "s et", + "b ody", + "ĠBl ack", + "Ġincre ase", + "itt en", + "Ġneed ed", + "3 6", + "3 2", + "= \"", + "Ġl ost", + "Ġbec ame", + "Ġgrou ps", + "ĠM us", + "Ġw rote", + "ĠP e", + "Ġpro p", + "j oy", + "à ©", + "ĠWh ite", + "Ġde ad", + ". '", + "Ġhtt p", + "Ġwe bs", + "O S", + "Ġins ide", + "Ġwr ong", + "Ġstat ement", + "Ġ ...", + "y l", + "Ġfil m", + "Ġmus ic", + "Ġsh are", + "ific ation", + "Ġre lease", + "Ġfor ward", + "Ġst ay", + "Ġcomp ut", + "it te", + "s er", + "Ġorig inal", + "Ġc ard", + "Ġc and", + "Ġd iv", + "at ural", + "Ġfav or", + "O M", + "Ġc ases", + "us es", + "Ġse ction", + "Ġle ave", + "g ing", + "ov ed", + "ĠW ashington", + "3 9", + "ĠG l", + "Ġrequ ired", + "act ion", + "ap an", + "o or", + "it er", + "ĠK ing", + "Ġcount ries", + "ĠG erman", + "ll ing", + "Ġ2 7", + "3 4", + "Ġquest ions", + "Ġpr im", + "Ġc ell", + "Ġsh oot", + "Ġany one", + "ĠW est", + "Ġaff ect", + "ep end", + "Ġon line", + "ĠIs rael", + "ĠSept ember", + "Ġab ility", + "Ġcont ent", + "is es", + "Ġre ve", + "Ġl aun", + "Ġind ic", + "Ġfor ce", + "c ast", + "Ġso ld", + "av ing", + "f l", + "Ġso ft", + "Ġcompan ies", + "ce ed", + "Ġart icle", + "Ġa ud", + "Ġre v", + "Ġed uc", + "Ġplay ing", + "0 5", + "Ġhe ld", + "ct or", + "Ġrele ased", + "Ġf ederal", + "3 7", + "Ġad minist", + "Ġinter view", + "Ġinst all", + "Ġrece ived", + "Ġs ource", + "u k", + "P h", + "Ġser ious", + "Ġcre ated", + "Ġc ause", + "Ġim medi", + "Ġdef in", + "u el", + "ĠDep artment", + "ct ions", + "ĠC our", + "ĠN ow", + "z e", + "it es", + "it ution", + "Ġl ate", + "Ġspe ak", + "n ers", + "Ġleg al", + "ar i", + "ĠC or", + "Ġwe eks", + "Ġmod el", + "Ġp red", + "Ġex act", + "B C", + "ĠB y", + "IN G", + "os ing", + "Ġt akes", + "Ġreg ard", + "Ġopp ortun", + "Ġpr ice", + "Ġ19 8", + "ĠA pr", + "f ully", + "Ġor d", + "Ġproble ms", + "ru ction", + "h am", + "ĠC ount", + "le ge", + "Ġlead ers", + "E T", + "le v", + "Ġde ep", + "olog ical", + "es e", + "h aps", + "ĠS ome", + "Ġp ers", + "Ġcont ract", + "Ġrelations hip", + "s p", + "ou d", + "Ġb ase", + "4 8", + "m it", + "A d", + "anc ial", + "Ġcons um", + "Ġpot ential", + "Ġl angu", + "re m", + "et h", + "Ġrel ig", + "ress ed", + "6 6", + "Ġl ink", + "Ġl ower", + "ay er", + "ĠJ une", + "Ġf em", + "un t", + "er c", + "ur d", + "Ġcont act", + "Ġ ill", + "Ġm other", + "Ġest ab", + "h tt", + "ĠM arch", + "ĠB ro", + "ĠCh ina", + "Ġ2 9", + "Ġs qu", + "Ġprov ided", + "Ġa verage", + "as ons", + "Ġ201 1", + "Ġex am", + "l in", + "5 5", + "n ed", + "Ġper fect", + "Ġt ou", + "al se", + "u x", + "Ġbu y", + "Ġsh ot", + "Ġcol lect", + "Ġph ot", + "Ġplay ed", + "Ġsur pr", + "Ġofficial s", + "Ġsim ple", + "av y", + "Ġindust ry", + "Ġhand s", + "g round", + "Ġp ull", + "Ġr ound", + "Ġus er", + "Ġr ange", + "u ary", + "Ġpriv ate", + "op s", + "e es", + "Ġw ays", + "ĠM ich", + "Ġve h", + "Ġex cept", + "Ġter ms", + "im um", + "pp er", + "I ON", + "ore s", + "ĠDr agon", + "ou l", + "Ġd en", + "Ġperform ance", + "Ġb ill", + "c il", + "4 7", + "Ġen vironment", + "Ġex c", + "ad d", + "Ġwor th", + "Ġp ict", + "Ġch ance", + "Ġ201 8", + "b or", + "Ġspe ed", + "ict ion", + "Ġal leg", + "ĠJ apan", + "at ory", + "re et", + "Ġm atch", + "ĠI I", + "Ġst ru", + "ord er", + "Ġst e", + "Ġl iving", + "Ġst ruct", + "in o", + "Ġse par", + "her n", + "Ġresp onse", + "Ġen joy", + "Ġv ia", + "A D", + "um ents", + "ace book", + "Ġmem ber", + "ib r", + "iz ing", + "Ġto ol", + "ĠM on", + "ĠWh ile", + "h ood", + "ĠA ng", + "ĠD ef", + "Ġoff er", + "T r", + "a ur", + "Ġturn ed", + "ĠJ uly", + "d own", + "an ced", + "Ġrec ently", + "ĠE ar", + "Ġc e", + "ĠSt ar", + "ĠC ong", + "rough t", + "Ġbl ood", + "Ġhop e", + "Ġcom ment", + "ain t", + "Ġar ri", + "il es", + "Ġpartic ip", + "ough t", + "ri ption", + "0 8", + "4 9", + "Ġg ave", + "Ġse lect", + "Ġkill ed", + "sy ch", + "Ġgo es", + "i j", + "Ġc oll", + "Ġimp act", + "at ives", + "ĠS er", + "0 9", + "ĠAug ust", + "Ġb oy", + "d e", + "ĠD es", + "Ġf elt", + "U S", + "Ġexpect ed", + "Ġim age", + "ĠM ark", + "cc ording", + "o ice", + "E C", + "ĠM ag", + "en ed", + "h old", + "ĠP ost", + "Ġpre vent", + "N o", + "Ġinvol ved", + "Ġey es", + "Ġquick ly", + "A t", + "un k", + "Ġbeh av", + "Ġ ur", + "Ġl ed", + "c ome", + "e y", + "Ġcand id", + "Ġear lier", + "Ġfoc us", + "et y", + "P ro", + "led ge", + "ix ed", + "ill ed", + "Ġpop ular", + "A P", + "Ġset t", + "l ight", + "Ġvar ious", + "in ks", + "Ġlevel s", + "Ġro ad", + "ell ig", + "ab les", + "he l", + "itte e", + "ĠG ener", + "y pe", + "Ġhe ard", + "ic les", + "Ġm is", + "Ġus ers", + "ĠS an", + "Ġimpro ve", + "Ġf ather", + "Ġse arch", + "The y", + "v il", + "Ġprof ess", + "Ġkn ew", + "Ġl oss", + "Ġev ents", + "6 5", + "Ġb illion", + "0 7", + "0 2", + "ĠNew s", + "ĠA M", + "Ġco ver", + "w here", + "ens ion", + "Ġb ott", + "Ġare as", + "en ces", + "op e", + "ĠTw itter", + "a el", + "Ġget s", + "ĠGo ogle", + "Ġs n", + "i ant", + "Ġv ote", + "Ġnear ly", + "Ġinclud ed", + "Ġrec ogn", + "z z", + "m m", + "al ed", + "Ġhappen ed", + "0 4", + "Ġh ot", + "Ġwho se", + "Ġc ivil", + "Ġsu ff", + "o es", + "it iz", + "ĠSy ri", + "Ġresp ond", + "Ġh on", + "Ġfeat ures", + "Ġeconom ic", + "ĠApr il", + "r im", + "Ġtechn ology", + "Ġo ption", + "ag ing", + "Ġpur ch", + "R e", + "Ġl at", + "ch ie", + "is l", + "Ġrec omm", + "u f", + "Ġtr aining", + "Ġeffect s", + "Ġf ast", + "Ġ201 0", + "Ġocc ur", + "Ġwebs ite", + "Ġem ail", + "Ġs ens", + "e ch", + "Ġo il", + "Ġinf lu", + "Ġcurrent ly", + "ĠS ch", + "ĠAd d", + "Ġgo al", + "Ġsc ient", + "Ġcon v", + "1 00", + "em y", + "Ġdec ided", + "Ġtra vel", + "Ġm ention", + "L L", + "0 3", + "Ġe lection", + "Ġph one", + "Ġlook s", + "Ġsit uation", + "Ġc y", + "Ġh or", + "b ed", + "ĠCour t", + "a ily", + "av es", + "Ġqu ality", + "ĠCom p", + "w ise", + "Ġt able", + "Ġst aff", + "ĠW ind", + "et t", + "Ġtri ed", + "ide red", + "Ġadd ition", + "Ġb ox", + "Ġl ack", + "ar ily", + "Ġw ide", + "Ġm id", + "Ġbo ard", + "ys is", + "Ġant i", + "h a", + "Ġd ig", + "en ing", + "Ġd ro", + "C on", + "6 8", + "Ġsl ow", + "b ased", + "se qu", + "Ġp ath", + "E x", + "ak er", + "Ġwork ed", + "Ġp en", + "Ġeng ine", + "Ġlook ed", + "ĠSu per", + "ĠS erv", + "Ġvict im", + "U n", + "Ġproper ty", + "Ġint rodu", + "Ġexec ut", + "ĠP M", + "L e", + "Ġcol or", + "ĠM ore", + "Ġ6 0", + "Ġnet work", + "Ġd ate", + "c ul", + "id ge", + "Ġext ra", + "3 1", + "Ġs le", + "6 7", + "Ġw ond", + "Ġreport s", + "j ust", + "ĠAust ral", + "Ġcap ital", + "Ġen s", + "Ġcomm and", + "Ġallow ed", + "Ġpre p", + "Ġca pt", + "h ib", + "Ġnum bers", + "ch an", + "Ġf air", + "m p", + "om s", + "Ġre ach", + "W ith", + "t ain", + "Ġbro ad", + "Ġcou ple", + "ec ause", + "ly ing", + "ĠF eb", + "Ġsc reen", + "Ġl ives", + "Ġpri or", + "ĠCong ress", + "A r", + "Ġappro ach", + "Ġe mer", + "ar ies", + "ĠD is", + "s erv", + "ĠN e", + "Ġbu ilt", + "c ies", + "Ġre pe", + "Ġrul es", + "for ce", + "ĠP al", + "Ġfin ancial", + "Ġcons idered", + "ĠCh ar", + "n ces", + "ĠI S", + "Ġb rought", + "Ġb i", + "i ers", + "ĠS im", + "O P", + "Ġproduct s", + "Ġvis it", + "Ġdoc ument", + "Ġcon duct", + "Ġcomplete ly", + "in ing", + "ĠCal if", + "ib ly", + "Ġwr itten", + "ĠT V", + "em ents", + "Ġd raw", + "O ne", + "Ġpub lished", + "Ġsec ret", + "r ain", + "he t", + "ĠF acebook", + "ond ay", + "ĠU p", + "Ġsex ual", + "Ġth ous", + "ĠP at", + "Ġ ess", + "Ġstand ard", + "Ġar m", + "g es", + "ect ion", + "Ġf ell", + "Ġfore ign", + "an i", + "ĠFr iday", + "Ġreg ular", + "in ary", + "Ġincre ased", + "Ġus ually", + "Ġdem on", + "Ġd ark", + "Ġadd itional", + "ro l", + "ĠO f", + "Ġprodu ction", + "! !", + "und red", + "Ġintern ational", + "id ents", + "ĠF ree", + "rou p", + "Ġr ace", + "Ġm ach", + "Ġh uge", + "A ll", + "le ar", + "ove mber", + "Ġto wn", + "Ġatt ention", + "ĠO ff", + "y ond", + "ĠThe n", + "f ield", + "Ġter ror", + "ra z", + "ĠB o", + "Ġmeet ing", + "ĠP ark", + "Ġar rest", + "Ġf ear", + "Ġa w", + "ĠV al", + "or ing", + "' ,", + "Ġext reme", + "ar r", + "Ġwork ers", + "A fter", + "Ġ3 1", + "n et", + "am ent", + "Ġdirect ly", + "Ġpop ulation", + "ub e", + "ĠOct ober", + "ĠI N", + "ĠJan uary", + "5 9", + "ĠDav id", + "Ġc ross", + "ce mber", + "ĠF irst", + "Ġmess age", + "ir it", + "Ġn ation", + "Ġp oll", + "is ions", + "Ġansw er", + "n y", + "is ode", + "Ġcar ry", + "ĠRuss ia", + "Ġhe ar", + "eng th", + "ro y", + "Ġn atural", + "in ally", + "Ġdo g", + "m itted", + "Ġtr ade", + "Ġsub st", + "Ġmult iple", + "ĠAf ric", + "Ġf ans", + "Ġs ort", + "Ġgl obal", + "ic ation", + "ĠW ed", + "ar a", + "Ġa chie", + "Ġlangu age", + "ve y", + "Ġt al", + "Ġnecess ary", + "Ġdet ails", + "Ġs en", + "ĠS und", + "ĠRe g", + "ĠR ec", + "0 6", + "Ġs il", + "ress ive", + "Ġmed ical", + "un ch", + "orn ia", + "Ġu nd", + "f ort", + "oc ks", + "ĠM onday", + "ues day", + "c raft", + "7 7", + "ur t", + "Ġ ver", + "ĠH ill", + "Ġrece ive", + "Ġmor ning", + "es tern", + "Ġb ank", + "Ġs at", + "ir th", + "ĠH igh", + "Ġdev ice", + "ĠTH E", + "ĠCent er", + "Ġsaf e", + "Ġp le", + "ĠCanad a", + "Ġsystem s", + "Ġass ist", + "Ġsur v", + "Ġb attle", + "ĠS oc", + "vert is", + "S he", + "Ġp aper", + "Ġgrow th", + "Ġc ast", + "S c", + "Ġpl ans", + "ll ed", + "Ġpart s", + "Ġw all", + "Ġmove ment", + "Ġpract ice", + "im ately", + "Ġdis play", + "Ġsomet imes", + "om p", + "ĠP aul", + "ĠY es", + "k ing", + "5 8", + "o ly", + "Ġs on", + "Ġav oid", + "ok es", + "ĠJ ew", + "Ġto wards", + "as c", + "Ġ //", + "ĠK ore", + "Ġtalk ing", + "Ġcor rect", + "Ġsp ent", + "ic ks", + "i able", + "e ared", + "Ġter m", + "Ġwant s", + "om ing", + "Ġ ut", + "Ġdou b", + "Ġfor ces", + "Ġp lease", + "6 9", + "ĠN ovember", + "at form", + "ond on", + "Ġon es", + "Ġimmedi ately", + "ĠRuss ian", + "ĠM et", + "Ġde g", + "Ġparent s", + "C H", + "ĠAmeric ans", + "al y", + "ĠM od", + "Ġsh own", + "Ġcond itions", + "Ġst uff", + "Ġre b", + "ĠY our", + "Ġinclud es", + "n own", + "ĠS am", + "Ġexper ien", + "m ission", + "ĠE ven", + "augh t", + "Ġannoun ced", + "ĠRepublic an", + "Ġdeter min", + "Ġdescrib ed", + "ĠCount y", + "( )", + "Ġdo or", + "Ġchang ed", + "Ġne igh", + "ĠH ere", + "Ġcle an", + "Ġp an", + "ĠDe cember", + "ĠEurope an", + "ir ing", + "ap ter", + "Ġcl ub", + "ĠT uesday", + "Ġp aid", + "ĠN et", + "Ġattack s", + "Ġcharact ers", + "Ġal one", + "Ġdirect or", + "d om", + "Ġ3 5", + "Ġl oad", + "Ġr out", + "ĠCalif ornia", + "Ġfin ally", + "Ġr ac", + "Ġcont r", + "Ġexact ly", + "res h", + "p ri", + "ĠIs lam", + "Ġn ature", + "Ġcare er", + "Ġlat est", + "Ġcon vers", + "ĠS l", + "p ose", + "ci ent", + "ĠIn c", + "iv ity", + "8 8", + "ĠA tt", + "ĠM or", + "nes day", + "Ġwe ight", + "k en", + "Ġnot e", + "Ġteam s", + "Ġ \\", + "air s", + "ĠG reen", + "Ġh undred", + "on ent", + "Ġstre ng", + "Ġcons ist", + "ic ated", + "Ġreg ul", + "Ġl ic", + "ast ic", + "Ġt en", + "urs day", + "ellig ence", + "ous ly", + "ĠU K", + "B I", + "Ġcost s", + "Ġind epend", + "ĠA P", + "Ġnorm al", + "Ġh om", + "Ġob vious", + "Ġs we", + "Ġst ar", + "Ġread y", + "ac her", + "Ġimp lement", + "g est", + "Ġs ong", + "ĠG et", + "ĠL ab", + "Ġinterest ing", + "us ing", + "Ġg iving", + "ĠSund ay", + "Ġet c", + "Ġm iddle", + "Ġrem ember", + "r ight", + "os ition", + "ut ions", + "Ġm ax", + "4 6", + "Ġyour self", + "Ġdem and", + "Ġtreat ment", + "Ġd anger", + "ĠC ons", + "Ġgu y", + "ĠBrit ish", + "Ġphys ical", + "Ġrel ated", + "Ġrem ain", + "Ġcould n", + "Ġref er", + "Ġc itiz", + "b ox", + "EN T", + "bo ard", + "Ġin n", + "I G", + "er o", + "ĠSt reet", + "osp ital", + "ren ch", + "cher s", + "Ġst ra", + "O L", + "ag er", + "ĠA N", + "Ġeas ily", + "I A", + "en ge", + "in y", + "Ġcl os", + "ock ed", + "Ġus es", + "ĠC oun", + "I m", + "u ild", + "? ?", + "m ore", + "Ġan g", + "Ġwr ite", + "ol ute", + "5 7", + "Ġlead er", + "Ġread ing", + "< /", + "Ġaut om", + "est s", + "4 3", + "Ġleg isl", + "ĠG old", + "Ġdesign ed", + "ĠS T", + "ĠLe g", + "a res", + "Ġbe aut", + "ĠT ex", + "Ġappear s", + "Ġstru gg", + "ĠR om", + "Ġ 00", + "Ġcho ice", + "Ġparticular ly", + "ĠF rom", + "op er", + "ĠL ondon", + "ann ed", + "Ġallow s", + "ob ile", + "Ġdiffere nce", + "âĢ ¢", + "ĠV iew", + "ĠWed nesday", + "Ġal though", + "Ġrel ative", + "Ġapplic ation", + "ate ver", + "Ġare n", + "Ġmy self", + "Ġim ag", + "Ġdis e", + "Ġsoc iety", + "Ġfre qu", + "ĠEng lish", + "Ġpo or", + "ĠD ay", + "Ġwrit ing", + "Ġse ven", + "Ġstart ing", + "Ġb ud", + "Ġpr int", + "ĠTr ans", + "uf act", + "ĠSt ud", + "n ew", + "Ġcr im", + "Ġg ives", + "Ġco ol", + "a e", + "i ance", + "ĠGener al", + "Ġthink ing", + "Ġsa ve", + "Ġlim ited", + "ĠPart y", + "Ġmean ing", + "p en", + "ow ers", + "ĠJ ack", + "E M", + "Ġn ice", + "ru pt", + "Ġg as", + "Ġe ight", + "Ġfe et", + "Ġeff ort", + "Ġ ign", + "ic it", + "B l", + "co in", + "Ġop in", + "Ġbr ain", + "Wh ile", + "he st", + "ĠTh ursday", + "Ġwould n", + "augh ter", + "Ġtou ch", + "le ments", + "Ġstud ies", + "Ġcent er", + "c ont", + "or ge", + "Ġcomput er", + "Ġinvestig ation", + "P l", + "or ks", + "Ġ200 8", + "Ġincre asing", + "Ġst ore", + "Ġcom ments", + "Ġb al", + "m en", + "Ġdo ll", + "Ġl iber", + "Ġw ife", + "Ġlaw s", + "atur day", + "it ness", + "Ġmod ern", + "ĠS k", + "Ġadminist ration", + "Ġopportun ity", + "Ġs al", + "Ġpower ful", + "M y", + "Ġclaim s", + "ĠEar th", + "ord s", + "Ġt itle", + "Ġes c", + "n ame", + "N ot", + "om en", + "Ġbe yond", + "Ġc amer", + "Ġse ll", + "it ute", + "ear ch", + "Ġapp l", + "im ent", + "4 2", + "ĠAr t", + "Ġun f", + "Ġviol ence", + "ur g", + "ĠE ast", + "Ġcomp ared", + "Ġopt ions", + "Ġthrough out", + "Ġv s", + "ig r", + ". [", + "ac hes", + "7 8", + "Ġfil es", + "F L", + "E L", + "ar ian", + "ĠJ ames", + "ĠA ir", + "an ch", + "Ġdet ail", + "Ġpie ce", + "P S", + "Ġn amed", + "Ġeduc ation", + "Ġdri ve", + "Ġitem s", + "Ġstud ent", + "ic ed", + ": :", + "ic o", + "Ġth row", + "Ġsc ene", + "Ġcomple x", + "Ġ200 9", + "Ġpre c", + "ĠB re", + "7 9", + "Ġcon cept", + "Ġstat us", + "am ing", + "Ġd ied", + "Ġknow ledge", + "Ġbegin ning", + "O D", + "ru ary", + "Ġcertain ly", + "Ġgu ys", + "Ġsl ight", + "in n", + "ound s", + "Ġf ine", + "Ġf at", + "ic ations", + "Ġper haps", + "ĠA nt", + "Ġinc ome", + "Ġhtt ps", + "Ġmajor ity", + "port s", + "st on", + "Ġgreat er", + "Ġfe ed", + "ent ially", + "Ġsaf ety", + "Ġun ique", + "and om", + "Ġg one", + "Ġshow ed", + "Ġhist or", + "Ġcoun ter", + "i us", + "id a", + "Ġlead ing", + "i pe", + "Ġs end", + "ĠDon ald", + "er ve", + "Ġdef ense", + "ines e", + "Ġy es", + "ĠF ire", + "ĠMus lim", + "ra q", + "Ġcontin ued", + "os h", + "Ġprov ides", + "Ġpr ison", + "ĠP re", + "Ġhapp y", + "Ġeconom y", + "Ġtr ust", + "ag s", + "ĠG ame", + "Ġweap ons", + "um an", + "ĠC le", + "it ation", + "Ġanal ysis", + "ĠT imes", + "Ġsc ience", + "- >", + "Ġfig ure", + "Ġdis app", + "ent y", + "Ġsoft ware", + "Ġu lt", + "Ġoffic ers", + "N ew", + "I s", + "Ġrem ains", + "ĠInd ia", + "Ġp sych", + "ri ef", + "Ġc at", + "es c", + "Ġob serv", + "Ġst age", + "ĠD ark", + "Ġent er", + "ch ange", + "Ġpass ed", + "Ġdes pite", + "ĠO ut", + "Ġmov ie", + "r s", + "Ġv oice", + "m ine", + "ĠPl ay", + "Ġto ward", + "ĠT er", + "Ġreg ion", + "Ġval ues", + "or ters", + "Ġm ount", + "Ġoffic er", + "ĠO ther", + "b an", + "Ġh ous", + "w ood", + "ro om", + "I V", + "ĠS un", + "se e", + "ĠO ver", + "ro g", + "9 0", + "Ġl ay", + "ĠT ur", + "a wn", + "Ġpress ure", + "ĠS ub", + "Ġbook s", + "ed om", + "ĠS and", + "A A", + "ag o", + "Ġre asons", + "f ord", + "Ġactiv ity", + "U T", + "N ow", + "ĠSen ate", + "ce ll", + "n ight", + "Ġcall s", + "in ter", + "Ġlet ter", + "ĠR ob", + "ĠJ e", + "Ġcho ose", + "ĠL aw", + "G et", + "B e", + "Ġro b", + "Ġtyp es", + "Ġpl atform", + "Ġqu arter", + "R A", + "ĠT ime", + "Ġmay be", + "ĠC r", + "9 5", + "p re", + "Ġmov ing", + "Ġl if", + "Ġgo ld", + "Ġs om", + "Ġpat ients", + "Ġtr uth", + "ĠK e", + "ur ance", + "ant ly", + "m ar", + "Ġchar ge", + "ĠG reat", + "Ġce le", + "---------------- ----------------", + "Ġro ck", + "ro id", + "an cy", + "Ġcred it", + "a ud", + "B y", + "ĠE very", + "Ġmov ed", + "ing er", + "rib ution", + "Ġn ames", + "Ġstra ight", + "ĠHe alth", + "ĠW ell", + "Ġfe ature", + "Ġr ule", + "Ġsc he", + "in ated", + "ĠMich ael", + "ber g", + "4 1", + "il ed", + "b and", + "Ġcl ick", + "ĠAng el", + "on ents", + " Ń", + "ĠI raq", + "ĠS aturday", + "Ġa ware", + "p art", + "Ġpat tern", + "O W", + "ĠL et", + "Ġgr ad", + "ign ed", + "Ġassoci ated", + "Ġst yle", + "n o", + "i ation", + "a ith", + "il ies", + "Ġst ories", + "ur ation", + "Ġindividual s", + "ĠâĢ ¦", + "m iss", + "ĠAss oci", + "ish ing", + "ab y", + "Ġsum mer", + "ĠB en", + "Ġ3 2", + "Ġar ch", + "ut y", + "ĠTex as", + "h ol", + "Ġfull y", + "Ġm ill", + "Ġfollow ed", + "ĠB ill", + "ĠInd ian", + "ĠSec ret", + "ĠB el", + "ĠFeb ruary", + "Ġjob s", + "Ġseem ed", + "ĠGo vern", + "i pped", + "Ġreal ity", + "Ġl ines", + "Ġp ark", + "Ġmeas ure", + "ĠO ur", + "I M", + "Ġbro ther", + "Ġgrow ing", + "Ġb an", + "Ġest im", + "Ġc ry", + "ĠS chool", + "Ġme chan", + "ĠO F", + "ĠWind ows", + "Ġr ates", + "ĠO h", + "Ġpos itive", + "Ġcult ure", + "ist ics", + "ic a", + "Ġh ar", + "y a", + "ite ly", + "i pp", + "Ġm ap", + "en cies", + "ĠWill iam", + "I I", + "ak ers", + "5 6", + "ĠM art", + "ĠR em", + "Ġal tern", + "it ude", + "Ġco ach", + "row d", + "D on", + "Ġk ids", + "Ġj ournal", + "Ġcor por", + "Ġf alse", + "Ġwe b", + "Ġsle ep", + "Ġcont ain", + "Ġst o", + "Ġb ed", + "iver se", + "ĠR ich", + "ĠCh inese", + "Ġp un", + "Ġme ant", + "k nown", + "Ġnot ice", + "Ġfavor ite", + "a ven", + "Ġcond ition", + "Ġpur pose", + ") )", + "Ġorgan ization", + "Ġchall eng", + "Ġman ufact", + "Ġsus p", + "ĠA c", + "Ġcrit ic", + "un es", + "uc lear", + "Ġm er", + "vent ion", + "Ġ8 0", + "Ġm ist", + "ĠU s", + "ĠT or", + "htt p", + "ol f", + "Ġlarg er", + "Ġadv ant", + "Ġrese ar", + "Ġact ions", + "m l", + "Ġke pt", + "Ġa im", + ", '", + "c ol", + "Ġbenef its", + "if ying", + "Ġact ual", + "ĠIntern ational", + "Ġveh icle", + "Ġch ief", + "Ġeff orts", + "ĠLe ague", + "ĠM ost", + "Ġwa it", + "Ġad ult", + "Ġover all", + "Ġspe ech", + "Ġhigh ly", + "Ġfem ale", + "Ġer ror", + "Ġeffect ive", + "5 4", + "Ġenc our", + "w ell", + "Ġfail ed", + "Ġcons erv", + "Ġprogram s", + "Ġt rou", + "Ġa head", + "5 00", + "vertis ement", + "I P", + "ĠF ound", + "p ir", + "Ġ %", + "Ġcr ime", + "and er", + "Ġloc ation", + "ĠI ran", + "Ġbehav ior", + "az ing", + "Ġr are", + "Ġem b", + "Ġca used", + "Ġsh ip", + "Ġact ive", + "Ġcont ribut", + "Ġg reen", + "Ġac qu", + "Ġref lect", + "ven ue", + "Ġf irm", + "Ġb irth", + "] .", + "Ġclear ly", + "Ġem ot", + "Ġag ency", + "ri age", + "Ġmem ory", + "9 8", + "S A", + "ĠSe e", + "ac ing", + "C C", + "Ġbig gest", + "Ġr ap", + "Ġbas ic", + "Ġb and", + "e at", + "Ġsus pect", + "ĠM ac", + "Ġ9 0", + "m ark", + "ist an", + "Ġsp read", + "am s", + "k i", + "as y", + "ra v", + "ĠR ober", + "Ġdemon str", + "r ated", + "Ġabs olute", + "Ġpl aces", + "Ġim pl", + "ibr ary", + "Ġc ards", + "Ġdest roy", + "Ġv irt", + "ve re", + "Ġapp eared", + "y an", + "p oint", + "Ġbe g", + "Ġtem per", + "s pe", + "ant ed", + "ear s", + "ĠD irect", + "Ġl ength", + "Ġbl og", + "am b", + "Ġint eg", + "Ġres ources", + "ac c", + "if ul", + "Ġsp ot", + "Ġfor ced", + "Ġthous ands", + "ĠMin ister", + "Ġqu al", + "ĠF rench", + "at ically", + "Ġgener ally", + "Ġdr ink", + "Ġth us", + "I L", + "od es", + "Ġappro pri", + "ĠRe ad", + "Ġwh om", + "Ġey e", + "Ġcol lege", + "Ġ4 5", + "ire ction", + "Ġens ure", + "Ġapp arent", + "id ers", + "Ġrelig ious", + "Ġmin or", + "ol ic", + "Ġt ro", + "ĠWh y", + "rib ute", + "m et", + "Ġprim ary", + "Ġdevelop ed", + "Ġpe ace", + "Ġsk in", + "st e", + "av a", + "Ġbl ue", + "Ġfam ilies", + "Ġ ir", + "Ġapp ly", + "Ġin form", + "ĠSm ith", + "C T", + "i i", + "Ġlim it", + "Ġres ist", + "........ ........", + "um n", + "Ġconf lic", + "Ġtw e", + "ud d", + "ĠT om", + "Ġl iter", + "qu e", + "b on", + "Ġha ir", + "Ġevent ually", + "Ġp us", + "Ġhelp ed", + "Ġag g", + "or ney", + "ĠApp le", + "Ġf it", + "ĠS ur", + "Ġpre m", + "Ġs ales", + "Ġsecond s", + "Ġstreng th", + "Ġfeel ing", + "¿ ½", + "Ġt our", + "Ġknow s", + "o om", + "Ġex erc", + "Ġsom ew", + "ï ¿½", + "> >", + "Ġsp okes", + "Ġide as", + "Ġreg ist", + "so ft", + "ĠD el", + "ĠP C", + "Ġpro pos", + "Ġlaun ch", + "Ġbott om", + "T H", + "ĠP lease", + "v est", + "it z", + "ĠIn ter", + "Ġsc ript", + "Ġr at", + "ar ning", + "Ġ il", + "ĠJ er", + "ĠA re", + "Ġwh atever", + "ok en", + "ci ence", + "Ġmod e", + "Ġag ree", + "Ġs ources", + "Ġinit ial", + "Ġrest rict", + "Ġwond er", + "us ion", + "## ##", + "ĠS il", + "vil le", + "Ġb urn", + "t w", + "as ion", + "Ġ £", + "Ġn or", + "u ing", + "Ġre ached", + "Ġs un", + "Ġc ateg", + "ig ration", + "Ġc ook", + "Ġprom ot", + "Ġm ale", + "Ġcl imate", + "Ġf ix", + "Ġalleg ed", + "U R", + "all ed", + "Ġim ages", + "C ont", + "ot a", + "Ġschool s", + "i os", + "Ġd rop", + "Ġst ream", + "ĠM o", + "Ġprevious ly", + "al ing", + "Ġp et", + "Ġdou ble", + "Ġ( @", + "ann el", + "Ġdef ault", + "t ies", + "Ġr ank", + "ĠD ec", + "ĠCoun cil", + "Ġweap on", + "Ġst ock", + "Ġanal y", + "ĠSt r", + "Ġpict ure", + "ĠPol ice", + "f erence", + "Ġcent ury", + "Ġcitiz ens", + "Ġon to", + "Ġexp and", + "Ġhe ro", + "ĠS ol", + "Ġw ild", + "Ġupd ate", + "Ġcustom ers", + "r ont", + "d ef", + "Ġl ik", + "Ġcrim inal", + "ĠChrist ian", + "S P", + "7 6", + "Ġle aving", + "Ġother wise", + "ĠD ist", + "Ġbas is", + "5 2", + "5 3", + "ic ip", + "ĠB er", + "Ġrecomm end", + "Ġfl oor", + "Ġc rowd", + "ol es", + "Ġ7 0", + "Ġcent ral", + "ĠE v", + "Ġd ream", + "Ġdown load", + "Ġconf ir", + "ĠTh om", + "Ġwind ow", + "Ġhapp ens", + "Ġun it", + "Ġt end", + "Ġs pl", + "Ġbec omes", + "Ġfight ing", + "Ġpred ict", + "ĠP ress", + "ĠP ower", + "Ġhe avy", + "ak ed", + "Ġf an", + "or ter", + "ate gy", + "B A", + "iz es", + "Ġsp end", + "H ere", + "Ġ200 7", + "Ġad op", + "ĠH am", + "Ġfoot ball", + "ĠP ort", + "od ay", + "5 1", + "amp ions", + "Ġtrans fer", + "h t", + "Ġ3 8", + "ter m", + "ac ity", + "Ġb ur", + "] ,", + "tern al", + "r ig", + "b ut", + "Ġthere fore", + "ĠB ecause", + "res p", + "re y", + "Ġm ission", + "S ome", + "Ġnot ed", + "Ġass um", + "Ġdise ase", + "Ġed it", + "Ġprog ress", + "r d", + "ĠB rown", + "oc al", + "Ġadd ing", + "Ġra ised", + "ĠAn y", + "Ġt ick", + "Ġsee ing", + "ĠPe ople", + "Ġagre ement", + "Ġser ver", + "Ġw at", + "Ġdeb ate", + "Ġsupp osed", + "il ing", + "Ġlarg est", + "Ġsuccess ful", + "ĠP ri", + "ĠDemocr atic", + "Ġj ump", + "ĠSyri a", + "Ġown ers", + "Ġoff ers", + "Ġshoot ing", + "Ġeff ic", + "se y", + "Ġha ven", + "ver se", + "te red", + "ĠL ight", + "im al", + "ĠB ig", + "Ġdef end", + "Ġbe at", + "Ġrecord s", + "% )", + "Ġsc en", + "Ġemploy ees", + "Ġdev ices", + "he m", + "Ġcom mer", + "ĠM ex", + "Ġbenef it", + "ĠPro f", + "Ġil leg", + "Ġsur face", + "ĠAl so", + "Ġh arm", + "ing ly", + "w ide", + "ĠA lex", + "Ġsh ut", + "ĠC ur", + "Ġl ose", + "p m", + "Ġchall enge", + "se mb", + "Ġst ation", + "Ġint elligence", + "Ġacc ur", + "ĠFl or", + "Ġrequ ires", + "ĠM al", + "b um", + "Ġh ospital", + "Ġsp irit", + "Ġoff ered", + "Ġprodu ce", + "ĠComm un", + "Ġcreat ing", + "Ġcr is", + "s pect", + "Ġend ed", + "Ġd aily", + "Ġvot ers", + "land s", + "i as", + "i h", + "on a", + "Ġsm art", + "ĠOff ice", + "ĠL ord", + "ri al", + "ĠIntern et", + "Ġcirc um", + "Ġextreme ly", + "' .", + "Ġopin ion", + "ĠM il", + "Ġg ain", + "B S", + "ĠF in", + "y p", + "Ġuse ful", + "Ġbud get", + "Ġcom fort", + "is f", + "Ġback ground", + "el ine", + "Ġep isode", + "Ġen emy", + "Ġtri al", + "Ġestab lish", + "d ate", + "ĠC ap", + "Ġcontin ues", + "Ġshow ing", + "ĠUn ion", + "w ith", + "Ġpost ed", + "ĠSy stem", + "Ġe at", + "ri an", + "Ġr ise", + "ĠGerman y", + "il s", + "Ġsign ed", + "Ġv ill", + "Ġgr and", + "m or", + "ĠEng land", + "Ġproject s", + "um ber", + "Ġconf erence", + "z a", + "Ġrespons ible", + "ĠAr ab", + "Ġlearn ed", + "âĢĶ âĢĶ", + "i pping", + "ĠGe orge", + "O C", + "Ġreturn ed", + "ĠAustral ia", + "Ġb rief", + "Q u", + "Ġbr and", + "ill ing", + "ab led", + "Ġhig hest", + "Ġtr ain", + "ĠComm ission", + "wh ile", + "Ġn om", + "cept ion", + "Ġm ut", + "ĠBl ue", + "Ġinc ident", + "v ant", + "8 6", + "ĠI D", + "Ġn uclear", + "7 4", + "ĠL ike", + "ĠR E", + "ĠM icro", + "l i", + "m ail", + "Ġcharg es", + "8 9", + "Ġad just", + "ad o", + "Ġear th", + "N A", + "Ġpr ices", + "P A", + "Ġd raft", + "Ġrun s", + "Ġcandid ate", + "ens es", + "Ġmanag ement", + "ĠPh il", + "ĠM iss", + "Ġte ach", + "g ram", + "Ġunderstand ing", + "a it", + "ic ago", + "A dd", + "ĠE p", + "sec ut", + "Ġsepar ate", + "Ġinst ance", + "Ġe th", + "Ġun less", + "**** ****", + "ĠF ore", + "in ate", + "Ġoper ations", + "S p", + "Ġf aith", + "g ar", + "ĠCh urch", + "ron ic", + "Ġconf ig", + "os ure", + "Ġactiv ities", + "Ġtrad itional", + "Ġ3 6", + "Ġd irection", + "Ġmach ine", + "Ġsur round", + "Ġp ush", + "un ction", + "ĠE U", + "Ġeas ier", + "Ġarg ument", + "G B", + "Ġm icro", + "Ġsp ending", + "iz ations", + "Ġthe ory", + "ad ow", + "Ġcall ing", + "ĠL ast", + "Ġd er", + "Ġinflu ence", + "Ġcomm it", + "Ġph oto", + "Ġun c", + "ist ry", + "g n", + "ast e", + "ack s", + "Ġdis p", + "ad y", + "d o", + "ĠG ood", + "Ġ `", + "Ġw ish", + "Ġreve aled", + "Âł Âł", + "l ig", + "Ġen force", + "ĠComm ittee", + "Ġche m", + "Ġmil es", + "Ġinterest ed", + "Ġsol ution", + "ic y", + "in ct", + "Ġ- >", + "ĠD et", + "Ġrem oved", + "Ġcomp ar", + "e ah", + "Ġpl ant", + "ĠS ince", + "Ġachie ve", + "Ġadvant age", + "Ġslight ly", + "b ing", + "Ġpl aced", + "u nder", + "201 5", + "ĠM ad", + "Ġt im", + "os es", + "Ġc ru", + "ĠR ock", + "Ġmost ly", + "Ġneg ative", + "Ġset ting", + "Ġprodu ced", + "Ġm ur", + "Ġconnect ion", + "ĠM er", + "Ġdri ver", + "Ġexecut ive", + "Ġass ault", + "Ġb orn", + "ĠV er", + "t ained", + "Ġstruct ure", + "Ġredu ce", + "Ġdec ades", + "Ġd ed", + "u ke", + "ĠM any", + "idd en", + "Ġle ague", + "S e", + "Ġjo in", + "Ġdis co", + "Ġd ie", + "c ks", + "act ions", + "Ġass ess", + "ag n", + "Ġgo als", + "our s", + "I R", + "Ġsen ior", + "ill er", + "m od", + "ip ment", + "oc ol", + "u y", + "ĠQ ue", + "Ġpart ies", + "ir gin", + "Ġle arning", + "it able", + "Ġstre et", + "Ġcamer a", + "A pp", + "Ġsk ills", + "b re", + "c ious", + "Ġcele br", + "ĠFr anc", + "Ġexist ing", + "Ġwill ing", + "l or", + "Ġ id", + "ĠSp ace", + "Ġcrit ical", + "ĠL a", + "ortun ately", + "Ġser ve", + "Ġc old", + "Ġspec ies", + "T S", + "Ġanim als", + "ĠB ay", + "Ġold er", + "ĠU nder", + "est ic", + "ĠT re", + "Ġte acher", + "Ġpre fer", + "v is", + "Ġth read", + "ĠM att", + "Ġmanag er", + "ãĥ »", + "Ġprofess ional", + "ĠV ol", + "Ġnot es", + "The se", + "ul a", + "Ġf resh", + "ent ed", + "u zz", + "ed y", + "clus ion", + "ĠR el", + "Ġdoub t", + "E O", + "Ġopen ed", + "ĠB it", + "Ad vertisement", + "Ġgu ess", + "ĠU N", + "Ġse qu", + "Ġexpl ain", + "ott en", + "Ġatt ract", + "ak s", + "Ġstr ing", + "Ġcont ext", + "oss ible", + "ĠRepublic ans", + "Ġsol id", + "Ġc ities", + "Ġask ing", + "Ġr andom", + "u ps", + "ur ies", + "ar ant", + "dd en", + "g l", + "ĠFlor ida", + "Ġdep end", + "ĠSc ott", + "Ġ3 3", + "Ġi T", + "ic on", + "Ġmention ed", + "Ġ2 000", + "Ġclaim ed", + "Ġdefin itely", + "ul f", + "Ġc ore", + "Ġopen ing", + "ĠCon st", + "wh ich", + "ĠT ra", + "A G", + "7 2", + "Ġbelie ved", + "ad a", + "Ġ4 8", + "ĠSec urity", + "yr ight", + "ĠP et", + "ĠL ou", + "Ġhold ing", + "======== ========", + "Ġ ice", + "Ġb row", + "Ġauthor ities", + "h ost", + "w ord", + "Ġsc ore", + "ĠD iv", + "Ġcell s", + "Ġtrans l", + "Ġneigh bor", + "Ġrem ove", + "u ct", + "Ġdist rict", + "ĠA ccording", + "Ġwor se", + "Ġconcern s", + "Ġpresident ial", + "Ġpolic ies", + "ĠH all", + "7 3", + "Ġh us", + "A Y", + "Ġ200 6", + "ĠJ ud", + "Ġindepend ent", + "ĠJust ice", + "ili ar", + "pr int", + "igh ter", + "Ġprotect ion", + "z en", + "Ġsu dden", + "h ouse", + "ĠJ es", + "P R", + "ĠIn f", + "Ġb ul", + "Ġ _", + "ĠServ ice", + "ĠP R", + "Ġstr ategy", + "ff ect", + "Ġgirl s", + "Ġmiss ing", + "oy al", + "ĠTe am", + "ul ated", + "Ġd at", + "Ġpolit ics", + "ab or", + "A ccording", + "Ġspe ll", + "Ġg raph", + "ort hern", + "T C", + "A b", + "Ġlab or", + "is her", + "Ġk ick", + "ĠiT unes", + "Ġstep s", + "pos es", + "Ġsmall er", + "E n", + "ber t", + "Ġro ll", + "Ġresear chers", + "Ġcl osed", + "Ġtrans port", + "Ġlaw y", + "________ ________", + "ĠCh icago", + "Ġas pect", + "Ġn one", + "Ġmar riage", + "9 6", + "Ġe lements", + "ĠF re", + "ĠS al", + "Ġd ram", + "F C", + "t op", + "e qu", + "Ġhe aring", + "Ġsupport ed", + "Ġtest ing", + "co hol", + "Ġmass ive", + "Ġst ick", + "Ġgu ard", + "is co", + "ph one", + "F rom", + "How ever", + "Ġb order", + "Ġcop y", + "ograph y", + "l ist", + "7 1", + "Ġown er", + "cl ass", + "ru it", + "r ate", + "ĠO nce", + "Ġdig ital", + "Ġt ask", + "ER S", + "Ġinc red", + "t es", + "+ +", + "ĠFr ance", + "Ġb reat", + "ow l", + "Ġiss ued", + "ĠW estern", + "Ġdet ect", + "Ġpart ners", + "Ġsh ared", + "ĠC all", + "Ġcan cer", + "ac he", + "rib e", + "Ġexpl ained", + "Ġhe at", + "{ \"", + "Ġinvest ment", + "ĠB ook", + "Ġw ood", + "Ġtool s", + "ĠAl though", + "Ġbelie f", + "Ġcris is", + "Ġg e", + "ĠM P", + "Ġoper ation", + "ty pe", + "~ ~", + "g a", + "Ġcont ains", + "ant a", + "Ġexp ress", + "ĠG roup", + "ĠJ ournal", + "k a", + "Ġam b", + "ĠUS A", + "Ġfind ing", + "Ġfund ing", + "h ow", + "Ġestab lished", + "ide os", + "Ġdeg ree", + "Ġdanger ous", + "ang ing", + "Ġfre edom", + "pp ort", + "out hern", + "Ġch urch", + "Ġc atch", + "ĠTw o", + "Ġpres ence", + "ĠGu ard", + "U p", + "Ġauthor ity", + "ĠPro ject", + "Ġbut ton", + "Ġcon sequ", + "Ġval id", + "Ġwe ak", + "Ġstart s", + "Ġref erence", + "ĠM em", + "\" )", + "U N", + "or age", + "ĠO pen", + "Ġcol lection", + "y m", + "g ency", + "Ġbeaut iful", + "ro s", + "Ġtell s", + "Ġwa iting", + "n el", + "Ġprov iding", + "ĠDemocr ats", + "Ġd aughter", + "Ġm aster", + "Ġpur poses", + "ĠJapan ese", + "Ġequ al", + "Ġturn s", + "Ġdoc uments", + "Ġwatch ing", + "R es", + "Ġr an", + "201 4", + "Ġre ject", + "ĠKore a", + "Ġvictim s", + "Le vel", + "ere nces", + "Ġw itness", + "Ġ3 4", + "Ġre form", + "com ing", + "Ġocc up", + "Ġc aught", + "Ġtra ffic", + "ad ing", + "Ġmod els", + "ar io", + "Ġserv ed", + "Ġb atter", + "u ate", + "ĠSecret ary", + "Ġagre ed", + "Ġtr uly", + "yn am", + "ĠR et", + "Ġun its", + "ĠRes earch", + "h and", + "az ine", + "ĠM ike", + "Ġvar iety", + "ot al", + "Ġam azing", + "Ġconfir med", + "Ġentire ly", + "Ġpurch ase", + "Ġe lement", + "Ġc ash", + "Ġdeter mine", + "D e", + "Ġc ars", + "ĠW all", + "â ĸ", + "Ġview s", + "Ġdrug s", + "Ġdep artment", + "ĠSt ep", + "u it", + "Ġ3 9", + "as ure", + "ĠCl ass", + "Ġc overed", + "ĠB ank", + "Ġme re", + "u ana", + "Ġmult i", + "Ġm ix", + "Ġun like", + "lev ision", + "Ġsto pped", + "Ġs em", + "ĠG al", + "ul es", + "Ġwe l", + "ĠJohn son", + "l a", + "Ġsk ill", + "Ġbec oming", + "ri e", + "Ġappropri ate", + "f e", + "ell ow", + "ĠPro t", + "ul ate", + "oc ation", + "Ġweek end", + "od ies", + "Ġsit es", + "Ġanim al", + "ĠT im", + "Ġsc ale", + "Ġcharg ed", + "Ġinst ruct", + "ill a", + "Ġmethod s", + "Ġc ert", + "Ġjud ge", + "ĠH el", + "Ġdoll ars", + "Ġstand ing", + "ĠS qu", + "Ġdeb t", + "l iam", + "Ġdri ving", + "ĠS um", + "ĠEd ition", + "Ġal bum", + "and on", + "I F", + "ĠU k", + "6 3", + "ad er", + "Ġcommer cial", + "es h", + "ĠGovern ment", + "Ġdisc overed", + "Ġout put", + "ĠHill ary", + "ĠCar ol", + "Ġ200 5", + "Ġab use", + "anc ing", + "Ġsw itch", + "Ġann ual", + "T w", + "Ġst ated", + "ag ement", + "in ner", + "Ġdem ocr", + "Ġres idents", + "Ġallow ing", + "Ġfact ors", + "od d", + "Ġf uck", + "em ies", + "Ġoccur red", + "ot i", + "Ġn orth", + "ĠP ublic", + "Ġinj ury", + "Ġins urance", + "C L", + "oll y", + "ã Ģ", + "Ġrepe ated", + "Ġar ms", + "ang ed", + "Ġconst ruction", + "Ġf le", + "P U", + "ic ians", + "Ġfor ms", + "ĠMc C", + "ant ic", + "Ġm ental", + "p ire", + "Ġequ ipment", + "Ġf ant", + "Ġdiscuss ion", + "Ġregard ing", + "k in", + "ar p", + "Ġch air", + "og ue", + "Ġpro ceed", + "ĠI d", + "O ur", + "Ġmur der", + "M an", + "Ġ4 9", + "as p", + "Ġsupp ly", + "Ġin put", + "Ġwe alth", + "liam ent", + "Ġpro ced", + "or ial", + "ĠSt at", + "ĠN FL", + "hen s", + "ĠInst itute", + "Ġput ting", + "ourn ament", + "et ic", + "Ġloc ated", + "Ġk id", + "er ia", + "r un", + "Ġpr inc", + "Ġ !", + "go ing", + "ĠB et", + "Ġcl ot", + "Ġtell ing", + "Ġprop osed", + "i ot", + "or ry", + "Ġfund s", + "g ment", + "ĠL ife", + "Ġb aby", + "ĠB ack", + "Ġsp oke", + "Im age", + "Ġear n", + "ĠA T", + "g u", + "Ġex change", + "ĠL in", + "ov ing", + "Ġp air", + "M ore", + "az on", + "Ġarrest ed", + "Ġkill ing", + "c an", + "ĠC ard", + "y d", + "Ġident ified", + "Ġm obile", + "Ġthan ks", + "ony m", + "ĠF orm", + "Ġhundred s", + "ĠCh ris", + "ĠC at", + "Ġtre nd", + "h at", + "ĠA v", + "om an", + "Ġelect ric", + "ĠW il", + "S E", + "O f", + "Ġrest aur", + "ot ed", + "Ġtr ig", + "Ġn ine", + "Ġb omb", + "Wh y", + " ¯", + "Ġco verage", + "Ġapp eal", + "ĠRober t", + "ĠS up", + "Ġfin ished", + "Ġfl ow", + "Ġdel iver", + "Ġcal cul", + "Ġphot os", + "Ġph il", + "Ġpie ces", + "Ġapp re", + "k es", + "Ġr ough", + "D o", + "Ġpart ner", + "Ġconcern ed", + "Ġ3 7", + "ĠG en", + "C ol", + "ct ors", + "Ġ= >", + "st ate", + "Ġsuggest ed", + "ĠFor ce", + "C E", + "Ġher self", + "ĠPl an", + "w orks", + "o oth", + "ren cy", + "Ġcor ner", + "Ġhus band", + "Ġintern et", + "ĠA ut", + "em s", + "os en", + "ĠAt l", + "g en", + "Ġbal ance", + "6 2", + "Ġsound s", + "te xt", + "Ġar r", + "ov es", + "Ġmill ions", + "Ġrad io", + "Ġsat isf", + "ĠD am", + "M r", + "G o", + "S pe", + "Ġcomb at", + "r ant", + "ĠG ree", + "Ġf uel", + "Ġdist ance", + "Ġtest s", + "Ġdec re", + "ĠE r", + "Ġman aged", + "D S", + "Ġt it", + "Ġmeas ures", + "ĠL iber", + "Ġatt end", + "as hed", + "ĠJ ose", + "ĠN ight", + "d it", + "ĠN ov", + "ĠE nd", + "out s", + "Ġgener ation", + "Ġadv oc", + "y th", + "Ġconvers ation", + "ĠS ky", + "act ive", + "ce l", + "ri er", + "ĠFr ank", + "Ġg ender", + "Ġcon cent", + "Ġcar ried", + "and a", + "ĠV irgin", + "Ġarri ved", + "ic ide", + "ad ed", + "Ġfail ure", + "Ġmin imum", + "le ts", + "Ġwor st", + "Ġkeep ing", + "Ġint ended", + "Ġilleg al", + "Ġsub sc", + "Ġdetermin ed", + "Ġtri p", + "Y es", + "Ġra ise", + "Ġ ~", + "Ġfeel s", + "Ġpack age", + "ĠJ o", + "h i", + "201 6", + "re al", + "Ġf ra", + "Ġsy mb", + "M e", + "uck y", + "p ret", + "ĠK h", + "ĠEd it", + "ĠWe b", + "em ic", + "ĠCol or", + "Ġjust ice", + "I nt", + "Ġfar m", + "ck now", + "\" >", + "el ess", + "Ġredu ced", + "Ġ5 00", + "x x", + "ĠR ad", + "ĠW ood", + "Ġcl in", + "Ġhy p", + "il er", + "ur a", + "k ins", + "8 5", + "6 1", + "ĠThe ir", + "ĠM ary", + "Ġs an", + "Ġno vel", + "ĠWh o", + "Ġcap acity", + "Ġimp ossible", + "Ġpl ays", + "Ġmin ister", + "ij uana", + "ic ate", + "ĠS et", + "Ġf ram", + "Ġ ing", + "Ġcommun ities", + "ĠF BI", + "it a", + "Ġb on", + "Ġstr ateg", + "Ġinterest s", + "l ock", + "g ers", + "m as", + "ĠAN D", + "Ġconflic t", + "Ġrequire ments", + "Ġs ac", + "Ġoper ating", + "in i", + "rel ated", + "Ġcomm itted", + "Ġrelative ly", + "Ġs outh", + "¯ ¯", + "Ġaff ord", + "Ġident ity", + "Ġdec isions", + "Ġacc used", + "pl ace", + "Ġvict ory", + "o ch", + "i at", + "N ame", + "C om", + "t ion", + "ed s", + "Ġsee k", + "Ġt ight", + "ĠIm ages", + "Ġinit i", + "Ġhum ans", + "Ġfam iliar", + "Ġaud ience", + "Ġintern al", + "vent ure", + "Ġs ides", + "ĠT O", + "Ġd im", + "Ġcon clud", + "Ġapp oint", + "Ġenforce ment", + "ĠJ im", + "ĠAssoci ation", + "Ġcircum st", + "ĠCanad ian", + "Ġjo ined", + "Ġdiffere nces", + "ĠL os", + "Ġprot est", + "Ġtw ice", + "w in", + "Ġgl ass", + "ars h", + "ĠAr my", + "Ġexp ression", + "Ġdec ide", + "Ġplan ning", + "an ia", + "Ġhand le", + "ĠMicro soft", + "ĠN or", + "Ġmax imum", + "ĠRe v", + "Ġse a", + "Ġev al", + "Ġhel ps", + "re f", + "Ġb ound", + "Ġm outh", + "Ġstand ards", + "Ġcl im", + "ĠC amp", + "ĠF ox", + "cl es", + "Ġar my", + "ĠTe chn", + "ack ing", + "x y", + "S S", + "Ġ4 2", + "Ġbu g", + "ĠUk rain", + "ĠM ax", + "ĠJ ones", + "ĠSh ow", + "l o", + "Ġplan et", + "Ġ7 5", + "Ġwin ning", + "Ġf aster", + "Ġspe ct", + "Ġbro ken", + "T R", + "Ġdef ined", + "Ġhealth y", + "Ġcompet ition", + "htt ps", + "ĠIs land", + "ĠF e", + "Ġannoun ce", + "ĠC up", + "ĠInst ead", + "Ġcl ient", + "Ġposs ibly", + "se ction", + "ock et", + "l ook", + "Ġfin ish", + "Ġcre w", + "Ġres erv", + "Ġed itor", + "Ġh ate", + "Ġs ale", + "Ġcontro vers", + "Ġp ages", + "w ing", + "Ġnum er", + "Ġopp osition", + "Ġ200 4", + "Ġref uge", + "Ġfl ight", + "Ġap art", + "ĠL at", + "A meric", + "ĠAfric a", + "Ġapplic ations", + "ĠPal est", + "ĠB ur", + "Ġg ar", + "ĠSoc ial", + "Ġup gr", + "Ġsh ape", + "Ġspe aking", + "ans ion", + "a o", + "ĠS n", + "Ġwor ry", + "ĠBrit ain", + "P lease", + "rou d", + "Ġh un", + "Ġintrodu ced", + "Ġd iet", + "I nd", + "ĠSec ond", + "Ġfun ctions", + "ut s", + "ĠE ach", + "ĠJe ff", + "Ġst ress", + "Ġaccount s", + "Ġgu arant", + "ĠAn n", + "ed ia", + "Ġhon est", + "Ġt ree", + "ĠAfric an", + "ĠB ush", + "} ,", + "Ġs ch", + "ĠOn ly", + "Ġf if", + "ig an", + "Ġexerc ise", + "ĠEx p", + "Ġscient ists", + "Ġlegisl ation", + "ĠW ork", + "ĠS pr", + "à Ĥ", + "ĠH uman", + "Ġ è", + "Ġsur vey", + "Ġr ich", + "ri p", + "Ġmain tain", + "Ġfl o", + "Ġleaders hip", + "st ream", + "ĠIslam ic", + "Ġ 01", + "ĠCol lege", + "Ġmag ic", + "ĠPr ime", + "Ġfig ures", + "201 7", + "ind er", + "x ual", + "ĠDe ad", + "Ġabsolute ly", + "Ġfour th", + "Ġpresent ed", + "resp ond", + "rib le", + "Ġal cohol", + "at o", + "ĠD E", + "por ary", + "Ġgr ab", + "Ġvar i", + "Ġqu ant", + "ĠPh oto", + "Ġpl us", + "r ick", + "ar ks", + "Ġaltern ative", + "Ġp il", + "Ġappro x", + "th at", + "Ġobject s", + "ĠR o", + "ĠAnd roid", + "Ġsignificant ly", + "ĠR oad", + "k ay", + "R ead", + "av or", + "Ġa cknow", + "ĠH D", + "ĠS ing", + "O r", + "ĠM ont", + "Ġun s", + "pro f", + "Ġneg oti", + "ĠAr ch", + "ik i", + "Ġte levision", + "ĠJew ish", + "Ġcomm ittee", + "Ġmot or", + "Ġappear ance", + "Ġs itting", + "Ġstri ke", + "ĠD own", + "com p", + "ĠH ist", + "Ġf old", + "ac ement", + "ĠLou is", + "Ġbel ong", + "ĠâĢ ¢", + "Ġm ort", + "Ġprep ared", + "Ġ6 4", + "ĠM aster", + "Ġind eed", + "ĠD en", + "Ġre nt", + "T A", + "our ney", + "ar c", + "S u", + "9 7", + "Ġadv ice", + "Ġchang ing", + "Ġlist ed", + "Ġlaun ched", + "is ation", + "ĠP eter", + "is hes", + "Ġl ived", + "ĠM el", + "ĠSup reme", + "ĠF ederal", + "Ġ) ;", + "ruct ure", + "Ġset s", + "Ġphil os", + "u ous", + "Ġ ł", + "Ġappl ied", + "ĠN OT", + "Ġhous ing", + "ĠM ount", + "Ġo dd", + "Ġsu st", + "D A", + "ffic ient", + "Ġ ?", + "ol ved", + "Ġp owers", + "Ġth r", + "Ġrem aining", + "ĠW ater", + "L C", + "Ġca uses", + "ãģ ®", + "Ġman ner", + "ad s", + "Ġsuggest s", + "Ġend s", + "stand ing", + "f ig", + "ĠD un", + "id th", + "Ġg ay", + "Ġter min", + "ĠAngel es", + "M S", + "Ġscient ific", + "Ġco al", + "ap ers", + "b ar", + "ĠThom as", + "Ġsy m", + "ĠR un", + "th is", + "P C", + "igr ants", + "Ġmin ute", + "ĠDist rict", + "cell ent", + "Ġle aves", + "Ġcomple ted", + "am in", + "Ġfoc used", + "Ġmon itor", + "Ġveh icles", + "M A", + "ĠM ass", + "ĠGr and", + "Ġaffect ed", + "itution al", + "Ġconst ruct", + "Ġfollow s", + "Ġt on", + "re ens", + "Ġh omes", + "ĠE xt", + "ĠLe vel", + "r ast", + "ĠI r", + "Ġel im", + "Ġlarge ly", + "ĠJ oe", + "Ġvot es", + "all s", + "Ġbusiness es", + "ĠFound ation", + "ĠCent ral", + "Ġy ards", + "Ġmaterial s", + "ul ner", + "Ġgu ide", + "Ġclos er", + "um s", + "Ġsp orts", + "ed er", + "J ust", + "Ġtax es", + "8 4", + "ĠO ld", + "Ġdec ade", + "ol a", + "Ġv ir", + "Ġdro pped", + "Ġdel ay", + "it ect", + "Ġsec ure", + "ste in", + "le vel", + "Ġtre ated", + "Ġfil ed", + "ain e", + "Ġv an", + "Ġm ir", + "Ġcol umn", + "ict ed", + "e per", + "Ġro t", + "Ġcons ult", + "Ġent ry", + "Ġmar ijuana", + "ĠD ou", + "Ġapparent ly", + "ok ing", + "clus ive", + "Ġincre ases", + "an o", + "Ġspecific ally", + "Ġte le", + "ens ions", + "Ġrelig ion", + "ab ilities", + "Ġfr ame", + "ĠN ote", + "ĠLe e", + "Ġhelp ing", + "Ġed ge", + "ost on", + "Ġorgan izations", + "à ĥ", + "ĠB oth", + "hip s", + "Ġbig ger", + "Ġbo ost", + "ĠSt and", + "Ġro w", + "ul s", + "ab ase", + "Ġr id", + "L et", + "are n", + "ra ve", + "Ġst ret", + "P D", + "Ġv ision", + "Ġwe aring", + "Ġappre ci", + "Ġa ward", + "ĠU se", + "Ġfact or", + "w ar", + "ul ations", + ") (", + "Ġg od", + "Ġter rit", + "Ġpar am", + "ast s", + "8 7", + "Ġen emies", + "ĠG ames", + "F F", + "Ġacc ident", + "W ell", + "ĠMart in", + "T ER", + "Ġat h", + "ĠHe ll", + "Ġfor g", + "Ġve ter", + "ĠMed ic", + "f ree", + "Ġst ars", + "Ġexp ensive", + "Ġac ad", + "ra wn", + "ĠW he", + "Ġl ock", + "Ġform at", + "Ġsold iers", + "s m", + "Ġag ent", + "Ġrespons ibility", + "or a", + "ĠS cience", + "Ġrap id", + "Ġt ough", + "ĠJes us", + "Ġbelie ves", + "M L", + "Ġwe ar", + "le te", + "Ãĥ ÃĤ", + "ĠD ri", + "Ġcomm ission", + "ĠB ob", + "O h", + "ap ed", + "Ġwar m", + "ÃĥÃĤ ÃĥÃĤ", + "Ġ200 3", + "ort ion", + "Ġhas n", + "ust er", + "Ġun ivers", + "ĠI ll", + "Ġk ing", + "olog ies", + "9 4", + "ĠT em", + "ĠM os", + "Ġpat ient", + "ĠMex ico", + "ce an", + "ĠDe ath", + "ĠSand ers", + "y ou", + "ĠC ast", + "ĠComp any", + "pt y", + "Ġhappen ing", + "F P", + "ĠB attle", + "Ġb ought", + "A m", + "M od", + "U s", + "ut ers", + "ĠC re", + "ĠTh ose", + "Ġ4 4", + "is er", + "Ġs oul", + "ĠT op", + "ĠHar ry", + "ĠA w", + "Ġse at", + "ff ee", + "Ġrev olution", + "Ġ( \"", + "ĠD uring", + "et te", + "Ġr ing", + "Ġoff ensive", + "Ġreturn s", + "Ġv ideos", + "Ġdis cl", + "Ġfam ous", + "en ced", + "ĠS ign", + "ĠR iver", + "Ġ3 00", + "P M", + "ĠB us", + "ĠC H", + "Ġcandid ates", + "ard en", + "Ġpercent age", + "Ġvis ual", + "Ġthan k", + "Ġtrou ble", + "ner gy", + "Ġ200 1", + "Ġpro ve", + "ash ion", + "Ġen h", + "ĠL ong", + "U M", + "Ġconnect ed", + "Ġposs ibility", + "O ver", + "Ġexper t", + "Ġl ibrary", + "art s", + "ĠDirect or", + "Ġfell ow", + "9 2", + "ir ty", + "Ġd ry", + "Ġsign s", + "ĠL ove", + "Ġqu iet", + "f oot", + "Ġp ure", + "ĠH un", + "Ġf illed", + "ph as", + "ĠE lect", + "end ment", + "ĠEx pl", + "Ġun able", + "n s", + "m o", + "Ġv ast", + "ob e", + "Ġident ify", + "app ing", + "ĠCarol ina", + "g ress", + "Ġpro te", + "Ġf ish", + "Ġcircumst ances", + "raz y", + "ĠPh ot", + "Ġb odies", + "ĠM ur", + "Ġdevelop ing", + "ĠA R", + "Ġexperien ced", + "Ġsubst ant", + "ĠBo ard", + "es ome", + "Ġdom estic", + "Ġcomb ined", + "ĠP ut", + "Ġchem ical", + "ĠCh ild", + "Ġpo ol", + "ĠC y", + "Ġe gg", + "c ons", + "st ers", + "Ġh urt", + "Ġmark ets", + "Ġconserv ative", + "Ġsupp orters", + "Ġag encies", + "id el", + "O b", + "ur b", + "Ġ4 3", + "ĠDef ense", + "y e", + "ĠA p", + "du le", + "Ġtemper ature", + "Ġconduct ed", + "ĠCh ief", + "Ġpull ed", + "Ġf ol", + "L ast", + "ont o", + "os is", + "V ER", + "D es", + "ĠP an", + "F irst", + "Ġadv ance", + "Ġlic ense", + "r ors", + "ĠJ on", + "Ġimag ine", + "Ġhe ll", + "Ġf ixed", + "Ġinc or", + "os ite", + "ĠL og", + "ick en", + "] :", + "Ġsurpr ise", + "h ab", + "Ġc raft", + "ol t", + "ĠJ ul", + "Ġd ial", + "Ġrele vant", + "Ġent ered", + "Ġlead s", + "ĠA D", + "ĠCle an", + "Ġpict ures", + "ess or", + "Ġal t", + "Ġpay ing", + "P er", + "ĠMark et", + "Ġupd ates", + "am ily", + "ĠT ype", + "ĠH ome", + "Ġ5 5", + "semb ly", + "rom e", + "8 3", + "Ġgreat est", + "Ġhe ight", + "Ġhe av", + "ain ts", + "Ġlist en", + "as er", + "ĠS H", + "Ġcap able", + "ac le", + "Ġpers pect", + "in ating", + "Ġoff ering", + "ry pt", + "ĠDe velop", + "ab in", + "r c", + "Ġbr ight", + "al ty", + "ar row", + "Ġsupp l", + "ind ing", + "ack ed", + "gy pt", + "ĠAn other", + "p g", + "ĠVirgin ia", + "ĠL u", + "Ġpl anned", + "Ġp it", + "Ġswe et", + "T ype", + "ĠD i", + "Ġtyp ically", + "ĠFranc isco", + "Ġpro spect", + "ĠD an", + "Ġte en", + "re es", + "Ġsc hed", + "Ġh ol", + "Ġsc r", + "Ġlot s", + "l ife", + "Ġnews p", + "Ġfor get", + "ĠN one", + "ĠM iddle", + "ĠR yan", + "ed d", + "Ġse vere", + "Ġsu it", + "ll er", + "9 3", + "Ġcor respond", + "Ġexpl os", + "u ations", + "Ġfl ag", + "g ame", + "r id", + "Ġpr in", + "ĠD ata", + "Ġde ploy", + "ĠEn ter", + "su it", + "gh an", + "ĠM en", + "Ġthough ts", + "Ġmat ters", + "Ġad apt", + "ĠA ri", + "Ġf ill", + "Ġfor th", + "Ġs am", + "Ġ4 1", + "Ġpay ment", + "ĠH or", + "Ġsp ring", + "du c", + "Ġl osing", + "Ġbring ing", + "F O", + "al a", + "Ġdist ribution", + "he red", + "b our", + "ĠIsrael i", + "om a", + "Ġcomb ination", + "Ġpl enty", + "V E", + "C an", + "ĠH aw", + "Ġper man", + "ĠSpe cial", + "Ġto w", + "Ġsee king", + "Ġexam ples", + "Ġclass es", + "c r", + "Ġbe er", + "Ġmov es", + "ĠI P", + "ĠK n", + "Ġpan el", + "E ven", + "Ġproper ly", + "Ġr is", + "Ġpl ug", + "Ġestim ated", + "E very", + "Ġdef ensive", + "ag raph", + "Ġpre gn", + "Ġinst it", + "ĠV ict", + "Ġvol ume", + "Ġpos itions", + "Ġl inks", + "ĠPro gram", + "ĠWe ek", + "ag ues", + "Ġtrans form", + "k er", + "ĠC EO", + "Ġc as", + "Ġopp onent", + "Ġtwe et", + "ĠC ode", + "Ġsh op", + "Ġf ly", + "Ġtal ks", + "Ġb ag", + "Ph one", + "Ġa id", + "Ġpl ants", + "Ġ6 5", + "Ġatt orney", + "ar ters", + "qu est", + "ĠMag ic", + "Ġbeg ins", + "Ġmy ster", + "Ġenvironment al", + "Ġst orage", + "N N", + "Ġm arg", + "Ġs ke", + "Ġmet al", + "ell y", + "Ġord ered", + "Ġrem ained", + "Ġl oved", + "Ġprom pt", + "Ġupd ated", + "Ġexper ts", + "Ġwalk ing", + "Ġan cient", + "Ġperform ed", + "AT E", + "Ġne ither", + "i ency", + "Ġmanufact ure", + "ĠP ak", + "Ġselect ed", + "Ġm ine", + "Ġult imately", + "Ġexpl an", + "Ġlab el", + "ĠServ ices", + "ribut ed", + "Tr ump", + "Ġsy n", + "ĠU lt", + "S C", + "Ġme at", + "Ġg iant", + "ĠW ars", + "ĠO N", + "Ġad m", + "Ġinter pret", + "Ġeven ing", + "Ġev il", + "ĠB oston", + "ĠW ild", + "Ġ Ã", + "ĠBit coin", + "ĠAm azon", + "D r", + "ĠIn formation", + "Ġobvious ly", + "Ġadv anced", + "Ph oto", + "ol ar", + "Ġwe ather", + "Ġsymb ol", + "Ġso le", + "Ġpot entially", + "ost er", + "Ġorig inally", + "m un", + "3 00", + "az e", + "ess ions", + "Ġde ck", + "Ġst ood", + "Ġyou th", + "ĠB ern", + "R ep", + "ĠT est", + "Ġbas ically", + "ot ic", + "Ġinvol ve", + "ol it", + "ly n", + "S ee", + "Ġair craft", + "Ġconf irm", + "E W", + "Ġmess ages", + "ĠRich ard", + "Ġk it", + "Ġpro hib", + "Ġv ulner", + "is ters", + "Ġexist ence", + "Ġturn ing", + "ĠS P", + "Ġdes ire", + "Ġfl at", + "Ġm ent", + "se ason", + "ang es", + "Ġneighbor hood", + "ĠL ake", + "AT ION", + "Ġpoint ed", + "b ur", + "Ġinn ov", + "uc ks", + "U L", + "Ġprofess or", + "Ġexp ressed", + "A B", + "ic ious", + "Ġ200 2", + "ĠDe v", + "Ġs ession", + "Ġb are", + "s en", + "Ġdis s", + "ĠC ath", + "ĠP ass", + "ĠP oint", + "Ġdo ctor", + "or row", + "ail ed", + "ĠR ub", + "ĠD C", + "ĠChar l", + "p erson", + "Ġwrit er", + "igh ters", + "ure au", + "Ġob lig", + "Ġrecord ed", + "Ġbro ke", + "Ġord ers", + "il ty", + "Ġmot ion", + "in ity", + "l aw", + "ad ium", + "Ġimm igration", + "Ġcontr ast", + "Ġb att", + "Ġex cellent", + "Ġtechn ical", + "am i", + "Ġt un", + "Ġcl oud", + "ĠY ear", + "ge on", + "Ġcre ation", + "Ġstr ange", + "Ġa uth", + "Ġfor t", + "b orn", + "Ġext ent", + "ĠT oday", + "ĠCl ub", + "Ġr ain", + "Ġs ample", + "Ġaccept ed", + "Ġt act", + "Ġf ired", + "ĠS on", + "Ġstand s", + "Ġb oot", + "Ġ4 7", + "Ġstat ements", + "Ġvers ions", + "Ġse lling", + "ound ed", + "Ġ199 0", + "Ġwere n", + "ĠW atch", + "Ġexper iment", + "P ost", + "Ġret ail", + "ul ed", + "In st", + "un te", + "ãĥ ¼", + "Ġdep art", + "Ġb ond", + "i very", + "om pl", + "Ġre action", + "ĠSyri an", + "ĠP ac", + "app ed", + "ani el", + "D P", + "Ġres olution", + "Ġre act", + "Ġappro ved", + "on om", + "m ond", + "ĠO ffic", + "-- -", + "Ġrepl ace", + "Ġt ack", + "Ġsp ort", + "Ġch ain", + "Ġemer gency", + "r ad", + "ĠPalest in", + "Ġ4 6", + "Ġautom atically", + "Ġrout e", + "Ġp al", + "Ġb anks", + "ĠPar is", + "ĠMed ia", + "ro ad", + "ic ing", + "i xt", + "ist ed", + "Ġg rew", + "Ġco ord", + "ĠW here", + "om in", + "Ġsub s", + "� �", + "Ġ ±", + "Ġcorpor ate", + "Ġse lection", + "n oon", + "ĠRep ort", + "c s", + "clud ing", + "ord ers", + "anc he", + "ĠIt s", + "Ġslow ly", + "ĠE gypt", + "ĠA cc", + "Ġcol le", + "iqu es", + "E X", + "Ġattempt s", + "ur l", + "ĠC ross", + "Ġfind ings", + "ĠS C", + "ĠO R", + "Ġind ex", + "ens ity", + "ĠW ay", + "ĠL and", + "Ġsh ock", + "d is", + "Ġd ynam", + "Ġc art", + "m osp", + "S ince", + "i est", + "ĠB oy", + "Ġst orm", + "ĠCont in", + "201 3", + "he w", + "il it", + "Ġess ential", + "iqu id", + "O ther", + "ive red", + "Ġreason able", + "A ct", + "Ġsub sequ", + "ĠP ack", + "ĠF ort", + "Ġconsider ing", + "Ġun iversity", + "l og", + "Ġmar ried", + "Ġill ust", + "ĠTr ue", + "£ ı", + "Ġnumer ous", + "rast ructure", + "Ġserious ly", + "Ġrefer red", + "u a", + "Ġconsist ent", + "on na", + "ĠRe al", + "ru ption", + "ci ples", + "Ġfact s", + "9 1", + "ot es", + "er g", + "The n", + "Ġacc ompl", + "N ote", + "Ġre venue", + "Ġpass ing", + "Ġm al", + "e en", + "ĠY et", + "Ġg ather", + "ter day", + "ew ork", + "ĠA uthor", + "P e", + "Ġopt im", + "Ġr ub", + "Ġè £ı", + "Ġun known", + "st one", + "Ġun ion", + "ol ve", + "Ġopportun ities", + "Ġbrow ser", + "ĠW al", + "ĠC ost", + "Ġreport ing", + "st s", + "p et", + "Ġs and", + "Ġsudden ly", + "Ġsurpr ising", + "ĠV R", + "Ġsomew hat", + "ĠB as", + "ult ure", + "iz z", + "ĠC D", + "Ġchalleng es", + "Ġsett ings", + "Ġexperien ces", + "ĠF ull", + "Ġcan n", + "Ġrece iving", + "ES T", + "Ġj oint", + "Ġcult ural", + "Ġa st", + "8 2", + "as tern", + "ce ived", + "ĠC ru", + "Ġb ull", + "p ired", + "am m", + "Ġfac ing", + "p ower", + "Ġb oss", + "ĠH ol", + "Ġinst r", + "Ġincreasing ly", + "Ġsh ift", + "Ġstre ets", + "ĠWilliam s", + "ab b", + "Ġl ie", + "Ġl augh", + "ĠC a", + "P L", + "Ġadult s", + "Ġcustom er", + "Ġob tained", + "Ġsupport ing", + "ht ml", + "f ire", + "Ġdetail ed", + "Ġpick ed", + "ĠR ight", + "ld er", + "E E", + "st ood", + "ĠK im", + "Ġw ire", + "Ġs ight", + "Ġdevelop ers", + "Ġpers ons", + "Ġs ad", + "Ġc up", + "Ġwar ning", + "Ġboy s", + "l ong", + "Ġb ird", + "f o", + "Ġw al", + "Ġobserv ed", + "Ġz one", + "iven ess", + "Ġch annel", + "c ript", + "Ġref used", + "ĠAg ain", + "Ġsu c", + "Ġspokes man", + "ĠRe f", + "r ite", + "ou ston", + "ãĥ ³", + "ĠS her", + "Ġact s", + "ĠN ame", + "Ġstrugg le", + "ar ry", + "omet imes", + "Ġdisc rim", + "H T", + "Ġcateg ory", + "Ġreal ize", + "Ġemploy ee", + "ĠAf ghan", + "en ger", + "Ġgun s", + "ĠSte ve", + "ĠM ot", + "ĠO l", + "ok ed", + "Ġth ick", + "Ġfair ly", + "ill y", + "Ġsur ve", + "ĠM at", + "we ight", + "â Ķ", + "Ġtro ops", + "Ġag ents", + "Ġbatter y", + "Ġmot iv", + "à ¡", + "S ec", + "d en", + "o very", + "L S", + "Ġfl u", + "Ġconf ident", + "ĠO per", + "Ġem pty", + "Ġp hen", + "Ġse ctor", + "Ġexc ited", + "Ġrem ote", + "ap h", + "o en", + "Ġdestroy ed", + "Ġmor al", + "ĠH P", + "ĠR on", + "Ġd ress", + "ĠB at", + "Ġl it", + "ĠM S", + "Ġa f", + "H L", + "r um", + "is ms", + "Ġshould n", + "Ġsym pt", + "ĠTor onto", + "het ic", + "Ġcar bon", + "Ġinstall ed", + "Ġviol ent", + "Ġsol ar", + "j a", + "Ġpract ices", + "Ġr ide", + "ĠP enn", + "Ġimpro ved", + "Ġaud io", + "Ġbehav i", + "ĠP S", + "Ġe ating", + "D ata", + "ĠRe view", + "p ass", + "cl aim", + "u ated", + "ang ers", + "c hen", + "Ġproper ties", + "Ġany where", + "An other", + "Ġbl ow", + "ĠJack son", + "Ġp roud", + "Ġplan e", + "l ines", + "Ġsqu are", + "Ġpro of", + "ans as", + "Ġtalk ed", + "m akers", + "Ġs ister", + "Ġhold s", + "Ġres ident", + "Ġ= =", + "Ġresist ance", + "Ġspl it", + "Ġpro secut", + "Ġconf idence", + "res ents", + "Ġcut s", + "Ġexcept ion", + "Ġz ero", + "Get ty", + "Ġcop yright", + "Ġtot ally", + "orm al", + "ific ations", + "ĠAustral ian", + "Ġs ick", + "Ġ1 50", + "Ġhouse hold", + "Ġfe es", + "Ġdri vers", + "og en", + "ĠN Y", + "Ġnecess arily", + "Ġregul ations", + "ear ing", + "s l", + "Ġperspect ive", + "c are", + "ic ial", + "H is", + "Ġesc ape", + "Ġsurpr ised", + "ĠV an", + "ur rent", + "Ġv ac", + "8 1", + "ĠTh us", + "Ġem phas", + "ĠCh ampions", + "ĠI ce", + "Ġn arr", + "Ġhead s", + "Ġca using", + "b el", + "f ortunately", + "ĠM a", + "Ġtarg ets", + "ci pl", + "Ġafter noon", + "Ġadd s", + "ĠMay be", + "ĠF our", + "ess ed", + "ple te", + "Ġus ual", + "ch o", + "ing u", + "Ġwith d", + "ĠE nergy", + "ĠE conom", + "O O", + "Ġart icles", + "Ġinj ured", + "Ġman age", + "Ġexpl ains", + "Ġdi agn", + "R ec", + "at ures", + "Ġlink ed", + "Ġdiscuss ed", + "Ġexpl o", + "Ġocc asion", + "ath an", + "Ġopp osite", + "Ġfac es", + "Ġden ied", + "ĠK night", + "Ġn ut", + "Ġapprox imately", + "Ġdisapp oint", + "onym ous", + "ĠB est", + "ĠL o", + "ĠH y", + "ĠA ff", + "Ġvot ing", + "an while", + "ĠII I", + "Ġinstit utions", + "ag ram", + "ĠD aily", + "Ġdr ag", + "Ġnear by", + "Ġgu ilty", + "Ġcon ver", + "P re", + "s hip", + "Ġre ward", + "Ġphilos oph", + "ĠS S", + "u gh", + "Ġapp s", + "f riend", + "Ġu pper", + "Ġad vert", + "Ġs now", + "Ġfr ust", + "Ġour selves", + "F r", + "ĠD ie", + "amp ion", + "Ġdis miss", + "Ġc ere", + "Ġsign al", + "f rom", + "Ġ ).", + "Ġ5 2", + "Ġcr imes", + "it ors", + "est ival", + "use um", + "Ġcoun cil", + "ĠS aud", + "M ay", + "ĠG un", + "ic ian", + "et her", + "Ġsu fficient", + "ĠH en", + "so le", + "Ġhistor ical", + "ĠF ar", + "ĠT urn", + "Ġp in", + "Ġsuc ceed", + "m at", + "ly mp", + "Ġtrad ition", + "ĠO k", + "Ġc ro", + "Ġdesc ription", + "al le", + "Ġsk y", + "T e", + "Ġwide ly", + "Ġw ave", + "Ġdefin ition", + "ĠJew s", + "Ġcy cle", + "Ġref ere", + "Ġbr ings", + "us al", + "Ġal ive", + "Ġfrequ ently", + "Ġint ention", + "ĠCont rol", + "l v", + "y stem", + "Ġpriv acy", + "g ent", + "ren ce", + "ĠQu est", + "ĠChrist mas", + "Ġr ail", + "Ġco oper", + "Ġtest ed", + "ĠC apt", + "as ks", + "Ġcomfort able", + "Ġdel ivered", + "sc ape", + "Ġdep th", + "ĠG OP", + "Ġwrit es", + "Ġass ets", + "Ġsa v", + "im ents", + "Ġtrans ition", + "Ġart ist", + "ĠL ook", + "Ġl ob", + "Ġcomp onents", + "ar ity", + "Ġwalk ed", + "Ġro ot", + "Ġparticip ants", + "Ġnot iced", + "Ġres c", + "Ġn av", + "ĠAd minist", + "d a", + "ut ral", + "pl ate", + "Ġimport ance", + "Ġass ert", + "ious ly", + "c ription", + "Ġinj uries", + "ĠChe ck", + "Ġregist ered", + "Ġint ent", + "Ġmiss ed", + "ograph ic", + "Ġsent ence", + "oun ter", + "Ġassist ance", + "ev in", + "Ġdat abase", + "Ġbuild ings", + "Ġclass ic", + "Ġth inks", + "ĠOh io", + "P r", + "ug g", + "Ġfe e", + "p an", + "Ġeffect ively", + "Ġfac ility", + "Ġbe ar", + "Ġch apter", + "Ġdog s", + "ĠCol umb", + "Ġl atter", + "it ial", + "Ġad mitted", + "T V", + "ĠGe org", + "Ġpost s", + "\\ \\", + "Ġlawy er", + "Ġequ ival", + "Ġm and", + "Ġcontro lled", + "ĠW alk", + "ĠAnd rew", + "Ġmen u", + "am ental", + "Ġprotect ed", + "v a", + "Ġadminist r", + "or al", + "Ġre in", + "ĠS ar", + "Ġamount s", + "Ġn ative", + "ĠM oon", + "Ġrep resents", + "Ġab andon", + "Ġcarry ing", + "Ġt ank", + "m ary", + "Ġdecl ared", + "T ube", + "Ġh at", + "Ġpun ish", + "el lect", + "m es", + "Ġun iverse", + "ĠR od", + "ph y", + "Ġinf rastructure", + "Ġ5 1", + "Ġopp osed", + "ow nt", + "c a", + "ĠM ake", + "Ġhard ware", + "Ġco ffee", + "R el", + "b al", + "w orld", + "ĠS af", + "ĠSe a", + "in als", + "Ġown ed", + "Ġh all", + "ers ion", + "Ġdescrib e", + "ĠP ot", + "Ġport ion", + "Ġat mosp", + "Ġgovern ments", + "Ġdep ending", + "Ġoff ense", + "Ġtr ick", + "aw a", + "ĠL ine", + "ĠV is", + "ĠH ard", + "ĠOr ig", + "ĠCl ick", + "Ġdes k", + "ĠVal ley", + "ĠS ov", + "Ġmov ies", + "Ġrem ark", + "Ġm ail", + "Ġcons cious", + "Ġrul ing", + "ĠR ights", + "Ġmed ic", + "he nt", + "ĠW omen", + "> <", + "Ġrepl aced", + "ĠP rem", + "ĠTh anks", + "Ġre new", + "ĠB all", + "if orm", + "Ġsh ots", + "C omm", + "Ġar med", + "Ġconst ant", + "Ġt aste", + "Ġreal ized", + "Ġbu ff", + "Ġm o", + "Ġeffic ient", + "M ost", + "or ation", + "if ies", + "Ġcommun ication", + "Ġfl ood", + "Ġconsequ ences", + "Ġany way", + "ig g", + "ĠG M", + "ĠTh ank", + "Ġ iron", + "Ġev olution", + "ĠC op", + "tw itter", + "Ġ9 5", + "Ġrelationship s", + "ad el", + "ĠYou ng", + "Ġpropos al", + "ay ers", + "uild ing", + "ĠH ot", + "OR E", + "c os", + "Ġcoll abor", + "P G", + "ax y", + "Ġknow ing", + "Ġsupport s", + "ow ed", + "Ġcontrol s", + "Ġmere ly", + "um er", + "Ġath let", + "Ġf ashion", + "p ath", + "Ġg ift", + "Ġer a", + "AN D", + "Ġkind s", + "ĠKore an", + "Ġleg it", + "ul ous", + "Ġess entially", + "Ġthe rap", + "n ic", + "Ġsuff ered", + "Ġh ur", + "Ġprom ise", + "Ġex cess", + "Ġover w", + "Ġpr ime", + "ĠH ouston", + "er ry", + "ĠM s", + "R S", + "201 2", + "Ġst ores", + "ĠO lymp", + "Ġj ourney", + "Al though", + "S ub", + "ĠE duc", + "ĠCh apter", + "Ġrequest s", + "Ġconsum ers", + "Ġt iny", + "Ġis ol", + "ĠF air", + "b a", + "ĠY OU", + "Ġcr ash", + "ce ler", + "Ġemot ional", + "Ġgood s", + "Ġelect ed", + "Ġmod er", + "ĠLin ux", + "Ġbl ocks", + "Ġis land", + "ĠSoc iety", + "Ġelect ions", + "Ġbroad cast", + "Ġche ap", + "Ġn ations", + "Ġse asons", + "4 00", + "Ġwas te", + "ĠS at", + "Ġfield s", + "em ploy", + "Ġprof ile", + "Ġauth ors", + "AL L", + "ĠG ra", + "w est", + "ĠT y", + "Ġdeath s", + "Ġv acc", + "Ġfor med", + "Ġd u", + "Ġon going", + "ĠMuslim s", + "el f", + "ig ure", + "Ġass ume", + "ĠUkrain e", + "w ater", + "Ġco ast", + "Ġvot ed", + "g or", + "ĠA S", + "ĠMich igan", + "az a", + "ĠAr m", + "i ro", + "Ġf lex", + "as ters", + "' '", + "Ġwel come", + "ar l", + "Ġloc ations", + "ig ation", + "ĠF il", + "Ġbu ying", + "Ġarch itect", + "Ġhard er", + "ĠC ub", + "Ġinter face", + "Ġrestaur ant", + "Ġdisco ver", + "Ġex ceed", + "Ġfav our", + "ger y", + "Ġd uty", + "Ġp itch", + "ad or", + "ĠM ach", + "b oy", + "Ġrespond ed", + "Ġext ended", + "her s", + "M any", + "ra id", + "if er", + "ĠIn s", + "S er", + "Ġmed ium", + "s he", + "ĠS ports", + "Ġmag azine", + "ut ation", + "Ġlim its", + "ĠG all", + "Ġex ternal", + "raz il", + "Ġyoung er", + "t le", + "Ġrem ind", + "ĠC ON", + "Ġimmedi ate", + "Ġh idden", + "Ġvol unte", + "Ġsim pl", + "od cast", + "Ġph ase", + "d r", + "Ġpl ot", + "Ġexp osure", + "R I", + "og rap", + "v in", + "an ish", + "ĠAc ad", + "ĠEng ine", + "Ġexp ansion", + "ĠP ay", + "Y our", + "Ġpus hed", + "ĠE ll", + "ĠHe ad", + "Ġmarket ing", + "ĠA C", + "k et", + "Ġh its", + "Ġg ro", + "ĠA ge", + "ĠSc ot", + "] [", + "Ġst im", + "Ġi Phone", + "Ī Ĵ", + "Ġn arrow", + "ĠGet ty", + "ĠTur key", + "Ġperfect ly", + "Ġen able", + "ut ch", + "Ġprec ise", + "Ġreg ime", + "Ġsh if", + "Ġcomp ens", + "g un", + "d iv", + "Ġch osen", + "ĠK en", + "An y", + "Ġtre es", + "Ġrecomm ended", + "ĠR en", + "u able", + "ĠH T", + "F ollow", + "E G", + "ĠH and", + "ĠK enn", + "Ġarg uments", + "Ġex ists", + "Ġb ike", + "ĠCons erv", + "Ġbre aking", + "ĠG ar", + "Ġc razy", + "Ġvirt ual", + "ay lor", + "ix el", + "Ġ19 80", + "Ġper mission", + "ĠSer ies", + "Ġconsum er", + "Ġclose ly", + "c alled", + "Ġ5 4", + "Ġhop es", + "Ġar ray", + "ĠW in", + "ĠLab our", + "Ġsp ons", + "ĠI re", + "Ġp ow", + "Ġread ers", + "Ġemploy ment", + "Ġcreat ure", + "Ġresult ing", + "Ġaccur ate", + "Ġmom ents", + "Ġarg ued", + "Ġp ed", + "D uring", + "Ġ5 3", + "ĠT al", + "Ġs ought", + "Ġsuff ering", + "Ġ icon", + "le e", + "Ġ( $", + "al ian", + " °", + "Ġp ra", + "Ġbon us", + "( \"", + "k o", + "Ġact ing", + "D E", + "f all", + "Ġcompar ison", + "Ġsm ooth", + "ĠN AS", + "u pp", + "ĠJose ph", + "ep ing", + "ĠT ake", + "ĠM id", + "Ġs ending", + "f ast", + "ĠF all", + "Ġdeal ing", + "us er", + "ĠOr gan", + "C o", + "Ġatt ached", + "Ġse es", + "% .", + "Ġtyp ical", + "AR T", + "Ġfind s", + "ĠAs ia", + "um in", + "ĠC ore", + "ĠE nt", + "in ent", + "u ce", + "ĠBl ood", + "ĠN ever", + "Ġem ails", + "Ġhigh light", + "Ġconf ront", + "at us", + "ut ed", + "Ġun us", + "Ġtop ic", + "ĠAd am", + "Ġb le", + "at i", + "Ġunder stood", + "S et", + "st ruct", + "T P", + "Ġm ob", + "a a", + "ĠSt art", + "pect ed", + "se ll", + "Ġded icated", + "ĠC A", + "u an", + "Ġsong s", + "esc ription", + "Ġte ch", + "Ġr ape", + "Ġas ide", + "Ġgr ant", + "Ġ5 6", + "s ub", + "Ġarg ue", + "Ġcont aining", + "Ġsche dule", + "Ġliber al", + "Ġpublic ly", + "Ġheav ily", + "ĠU t", + "in er", + "ĠS ection", + "ĠC are", + "we et", + "l s", + "D is", + "âĶ Ģ", + "ĠF ollow", + "B ack", + "ĠI T", + "Ġb es", + "j i", + "ĠH it", + "est ed", + "Ġevery body", + "ĠSw ed", + "Ġfem in", + "Ġfac ilities", + "Ġcon ven", + "C omp", + "ĠO S", + "c ore", + "Ġan x", + "Ġdiv ision", + "ĠC am", + "ĠSt an", + "m ates", + "Ġexpl ore", + "pl om", + "Ġsh ares", + "pl oad", + "an es", + "Ġide al", + "et ers", + "ĠB ase", + "Ġpl astic", + "Ġdist inct", + "ĠNet work", + "ĠSe attle", + "Ġtrad ing", + "ens us", + "int end", + "Ġex hib", + "Ġinit ially", + "ĠF ood", + "Ġthous and", + "ĠBus iness", + "act er", + "Ġpar agraph", + "Ġrough ly", + "Ġw ww", + "Ġcreat ive", + "ĠCon f", + "Ġconsum ption", + "Ġfil ms", + "ag an", + "Ġob tain", + "Ġt all", + "Ġt or", + "Ġacknow led", + "Ġg rown", + "al o", + "K E", + "Ġ4 00", + "end ers", + "t aining", + "U G", + "Ġsu icide", + "Ġwat ched", + "ĠL ist", + "al i", + "re hens", + "Ġsurround ing", + "Ġp ip", + "Ġf lying", + "ĠJ ava", + "ord an", + "Ġserv ing", + "in ations", + "p ost", + "Ġsh o", + "A v", + "Ġj ail", + "z y", + "Ġ199 9", + "Ġ< /", + "Ġliter ally", + "ĠS ir", + "Ġexp osed", + "Ġl ies", + "st ar", + "Ġb at", + "Ġear ned", + "ĠD ig", + "Ġspec ified", + "ĠSe ason", + "Ġdeg rees", + "Don ald", + "Ġcent re", + "Ġsh aring", + "Ġwin ter", + "ĠC O", + "C he", + "Ġ Î", + "M P", + "Ġun w", + "Ġfew er", + "ĠM ir", + "Ġsomew here", + "ĠK ey", + "Ġattack ed", + "ĠK ir", + "Ġdom ain", + "Ġstrong er", + "Ġ9 9", + "Ġpen alty", + "I d", + "Sc ript", + "Ġdecl ined", + "Ġne ck", + "Ġfra ud", + "Ġcur rency", + "Ġr ising", + "R C", + "âĢ¦ âĢ¦", + "H z", + "Ġt ab", + "Ġtal ent", + "n am", + "ĠN BA", + "Ġvill age", + "Ġleg s", + "ĠN ext", + "E d", + "Ġac id", + "Ġhy d", + "8 00", + "Ġinvol ving", + "ĠIm age", + "ĠBe fore", + "F l", + "Ġyes terday", + "S ource", + "Ġterror ist", + "Ġsu p", + "Ġsy nt", + "ĠSaud i", + "Ġw est", + "Ġr u", + "b urg", + "Ġvis ible", + "Ġstru ck", + "r ison", + "Ġaw esome", + "Ġd rawn", + "Ġansw ers", + "ĠG irl", + "ĠR am", + "Ġthreat s", + "Ġdef eat", + "os it", + "Ġv ent", + "atur ally", + "Americ an", + "end a", + "ĠH oly", + "Ġr um", + "% ,", + "c ase", + "ĠHist ory", + "ĠYou Tube", + "Ġsit uations", + "ĠD NA", + "S te", + "Ġsa ved", + "It em", + "Ġrec ip", + "olog ist", + "Ġfac ed", + "Ġel ig", + "O nce", + "ĠL i", + "u h", + "Ġmist ake", + "ĠDiv ision", + "ĠB ell", + "Ġsympt oms", + " ®", + "Ġdom in", + "Ġfall ing", + "Ġend ing", + "as hes", + "Ġmat ches", + "ĠOn line", + "Ġexplan ation", + "D ef", + "red it", + "Ġany more", + "ĠT otal", + "ĠF OR", + "us hed", + "Ġlet ters", + "Ġris ks", + "ĠO K", + "Ġreported ly", + ": \\", + "Ġpl ate", + "Ġsubject s", + "Ġattempt ed", + "if ier", + "ian a", + "Ġunlike ly", + "ĠTh ough", + "um a", + "ĠIn vest", + "ĠPr in", + "ic an", + "ĠD ar", + "ĠColor ado", + "au g", + "Ġve get", + "a os", + "ri a", + "Ġshe l", + "Ġmark ed", + "Ġ( )", + "Ġsp r", + "p o", + "ĠL ink", + "Ġdef e", + "ĠJ r", + "Ġthem e", + "Ġpass ion", + "ĠP en", + "Ġinf o", + "iz er", + "Ġsh it", + "ĠC ivil", + "ap se", + "c re", + "Ġpo ly", + "Ġcomp onent", + "ĠChar les", + "ĠIre land", + "ĠPro v", + "Ġdo ctors", + "Ġgr anted", + "Ġpain t", + "Ġhon or", + "Ġsm oke", + "Ġpay ments", + "Ġprim arily", + "ĠKing dom", + "r ich", + "ate ll", + "Ġde als", + "Ġsched uled", + "Ġfund amental", + "Ġprote in", + "Ġnewsp aper", + "Ġcl ients", + "yth on", + "ĠD ate", + "h us", + "Ġfeed back", + "Ġstret ch", + "Ġc ock", + "Ġhot el", + "ĠQue en", + "Ġsu gar", + "Ġj u", + "Ġmil k", + "Ġappro val", + "ĠL ive", + "Ġequival ent", + "ef ully", + "Ġins ert", + "z ona", + "Ġext ension", + "d ri", + "J ohn", + "Ġacc omp", + "S m", + "ĠF und", + "Ġconst antly", + "Ġ` `", + "Ġgener ated", + "ĠA ction", + "ĠP sych", + "ĠT ri", + "Ġrecogn ize", + "Ġv ary", + "ph a", + "ĠR a", + "d f", + "et ch", + "ĠSov iet", + "Tw o", + "Ġpattern s", + "Ġprof ession", + "an ing", + "T ime", + "ĠL im", + "Ġcol ors", + "ĠA z", + "ĠT R", + "Ġinf ect", + "Ġphen omen", + "Ġshe ll", + "Al so", + "Ġput s", + "Ġdel ivery", + "Ġbro wn", + "Ġprocess ing", + "Ġlight s", + "ess age", + "ĠBro ok", + "ĠA ud", + "l ation", + "Ġindust rial", + "L ike", + "ĠB razil", + "rou s", + "ES S", + "ĠL uc", + "Ġsome how", + "Ġ8 5", + "Ġpro port", + "Ġpolit icians", + "Ġindic ate", + "Ġh ole", + "Ġtechn iques", + "Ġcompet itive", + "Ġph r", + "Ġv o", + "ist ent", + "ĠD ream", + "Ġcamp us", + "Ġaspect s", + "Ġhelp ful", + "Ġsh ield", + "or se", + "Ġtrig ger", + "m al", + "Ġ5 8", + "Ġt ort", + "Ġperson ally", + "Ġt ag", + "Ġkeep s", + "ĠV ideo", + "Ġben ch", + "Ġg ap", + "a ire", + "Ġe ast", + "Ġrec overy", + "per ial", + "Ġprof it", + "ĠM ic", + "Ġ5 7", + "Ġcol on", + "Ġstrong ly", + "st yle", + "Ġalleg ations", + "h an", + "Ġrep orters", + "j o", + "r ine", + "arg et", + "and al", + "Ġ0 3", + "Ġfl ash", + "tr ans", + "Ġstr ict", + "Ġpark ing", + "ĠPak istan", + "Ġl i", + "Ġwe ird", + "ĠE ric", + "Ġreg ions", + "ĠJ un", + "Ġint ellect", + "ĠW H", + "od ing", + "rib utes", + "up id", + "ĠT it", + "Ġf inger", + "or ia", + "Ġe lev", + "ĠF ield", + "Ġcon clusion", + "; ;", + "Ġfeel ings", + "Ġext ensive", + "Ġm ixed", + "Ġne uro", + "v y", + "Ġhar ass", + "ĠC irc", + "ou ch", + "Ġterrit ory", + "Ġsuccess fully", + "M ar", + "Ġing red", + "Ġoverw hel", + "Ġl ayer", + "V iew", + "Ġall ies", + "ill ance", + "ĠTh ree", + "Ġb unch", + "Ġnorm ally", + "Ġnet works", + "Ġsac r", + "ĠC IA", + "b les", + "Ġch ose", + "Ġopp onents", + "Ġregard less", + "Ġfr anch", + "Ġpre f", + "ĠP o", + "Ġbr idge", + "ann a", + "ĠSil ver", + "Ġw age", + "p age", + "ri or", + "Ġrad ical", + "ĠL ittle", + "Ġman ip", + "Ġsecret ary", + "Ġg ang", + "D R", + "F A", + "Ġdec ent", + "ĠSp irit", + "Ġun cle", + "ĠDevelop ment", + "Ġinvest ors", + "Ġwall s", + "Ġpub lish", + "Ġgener ate", + "iss ions", + "c ar", + "Ġprom ote", + "Ġcut ting", + "Ġche st", + "Ġdrink ing", + "Ġcollect ed", + "Ġ7 2", + "Ġhop ing", + "Ġem br", + "gor ith", + "Ġwar ned", + "Ġinstruct ions", + "O G", + "ĠD id", + "ĠAg ency", + "Ġg ear", + "Ġcritic ism", + "ĠF urther", + "Ġut il", + "ann y", + "R ed", + "Ġcoun sel", + "ĠAs ian", + "Ġredu ction", + "p ool", + "Ġteach ing", + "Ġdeep ly", + "i y", + "Ġestim ates", + "Ġcho ices", + "Ġperman ent", + "in em", + "ke l", + "Ġf asc", + "p se", + "f ile", + "ĠL ow", + "ĠP erson", + "Ġt ournament", + "st al", + "Ġm el", + "U ST", + "ĠR ay", + "az i", + "V al", + "Ġcont ained", + "ĠH olly", + "Ġw ake", + "Ġreve al", + "Ġprocess es", + "ĠIS IS", + "Ġ0 9", + "Ġbl ind", + "Ġste el", + "ĠB ad", + "Ġcare fully", + "app y", + "ro it", + "Ġg aming", + "Ġhous es", + "ĠC oll", + "Ġtr uck", + "er m", + "Ġsc ored", + "Ġocc as", + "ret urn", + "b ound", + "v ar", + "Ġsh arp", + "Ġaf raid", + "ĠE X", + "am ber", + "c ific", + "Ġsche me", + "N C", + "ĠPol it", + "Ġdecl ine", + "Ġ199 8", + "Ġpus hing", + "Ġposs ession", + "Ġpriv ile", + "Ġteacher s", + "Ġy ield", + "H A", + "ĠDav is", + "it led", + "#### ####", + "Ġr ig", + "ĠD aniel", + "ac on", + "Ġh ide", + "ut en", + "Ġcolle agues", + "Ġprin ciples", + "Ġl oud", + "Ġs in", + "ĠDem on", + "Ġst one", + "Ġ0 2", + "Ġt aught", + "Ġter rible", + "Ġst uck", + "ĠPol icy", + "te en", + "Ġimplement ation", + "ĠB BC", + "ĠAP I", + "Ġwhe el", + "all as", + "Ġch ampions", + "ol ars", + "play er", + "Ġrepeated ly", + "ĠSt ill", + "Ġlik es", + "ast y", + "es ter", + "ĠCath olic", + "R L", + "Ġb ath", + "Ġno ise", + "t itle", + "Ġn orthern", + "P art", + "Ġmag n", + "Ġf ab", + "ĠAs h", + "Ġdis pl", + "Ġtick et", + "Ġm urd", + "Ġalong side", + "ĠMus ic", + "Ġr iver", + "ĠSte el", + "ĠC L", + "ĠPl ayer", + "ĠM ult", + "ow ing", + "re p", + "s ize", + "Ġt ur", + "ĠGeorg ia", + "isc al", + "ra ction", + "Ġc able", + "Ġ5 9", + "Ġw ins", + "Ġup coming", + "Ġsurv ive", + "Ġins pired", + "ĠEduc ation", + "Ġstat istics", + "ĠF oot", + "iam i", + "Ġy ellow", + "ĠP age", + ". -", + "ĠH as", + "Ġur ban", + "Ġa x", + "es sel", + "\\ \"", + "Ġquarter back", + "Ġreg ister", + "ĠLab or", + "Ġab ilities", + "ĠF amily", + "Ġvar iable", + "ĠPr ice", + "Ġcont em", + "Ġth in", + "ĠE qu", + "d ata", + "Ġg otten", + "Ġconst it", + "Ġas ks", + "Ġt ail", + "Ġexc iting", + "ĠE ffect", + "ĠSp anish", + "Ġencour age", + "ins on", + "ĠA h", + "Ġcommit ment", + "C S", + "Ġr ally", + "Ġ: :", + "Ġsubs id", + "Ġsp in", + "Ġcapt ured", + "201 8", + "Ġinn oc", + "Ġalleged ly", + "ĠC ome", + "Ġart ists", + "ĠN umber", + "Ġelect ronic", + "Ġreg ional", + "ap es", + "Ġw ra", + "Ġmy th", + "pr ise", + "ĠM iller", + "ĠC reat", + "ĠEp isode", + "b ell", + "Ġdirect ed", + "Ġext ract", + "Ġs orry", + "Ġv ice", + "ag ger", + "ĠSu pport", + "Ġ6 6", + "ĠI ron", + "Ġwonder ful", + "Ġg ra", + "N et", + "ion e", + "E ng", + "Ġsh ips", + "ik es", + "ĠK evin", + "it ar", + "Ġactiv ists", + "tr ue", + "ĠAri zona", + "ent h", + "ĠDes pite", + "ĠS E", + "Ġha bit", + "ern el", + "Ġin qu", + "Ġab ortion", + "Ġv oid", + "Ġexpl icit", + "Ġeng aged", + "Ġang ry", + "Ġr ating", + "Ġfr ag", + "b ro", + "ick ing", + "d ev", + "Ġwor ried", + "Ġob ser", + "Ġap artment", + "ĠG T", + "Ġest ate", + "ĠConst itution", + "em on", + "ĠS now", + "Ġcount y", + "Ġdis ag", + "ĠStep hen", + "Ġimm igrants", + "w ind", + "ĠN ations", + "Ġfol ks", + "O ut", + "Ġg all", + "Ġtarget ed", + "Ġst ead", + "ĠB on", + "ĠL ib", + "Ġinform ed", + "Ġ12 0", + "ch ain", + "idel ines", + "or ough", + "Ġdri ven", + "Ġregular ly", + "Ġbas ket", + "Ġprinc iple", + "oc ument", + "Ġst un", + "ib ilities", + "ĠRom an", + "ĠAb out", + "Ġal ert", + "Ġdemocr acy", + "Ġrepresent ed", + "H S", + "c ers", + "p arent", + "Ar t", + "p ack", + "Ġdi plom", + "re ts", + "ĠN O", + "Ġcapt ure", + "ĠAd v", + "Ħ ¢", + "Ġannounce ment", + "ĠL ear", + "Ġh ook", + "Ġpur s", + "ĠS uch", + "ĠC amer", + "Ġrefuge es", + "ĠV e", + "P ol", + "Ġrecogn ized", + "l ib", + "Ġhad n", + "A ss", + "Ġpil ot", + "us hing", + "Ġreturn ing", + "Ġtra il", + "ĠSt one", + "Ġrout ine", + "Ġcour ts", + "Ġdes per", + "Ġfriend ly", + "ĠIt aly", + "Ġpl ed", + "Ġbreat h", + "Ġstud io", + "N S", + "Ġimp ressive", + "ĠAfghan istan", + "Ġf ing", + "Ġd ownt", + "ink ing", + "ĠR og", + "i ary", + "col or", + "se x", + "ar on", + "Ġf ault", + "ĠN ick", + "D own", + "ĠR ose", + "ĠS outhern", + "X X", + "is odes", + "L ist", + "6 00", + "Ġout come", + "er r", + "Ġelse where", + "Ġret ire", + "Ġp ounds", + "ĠGl obal", + "Pe ople", + "Ġcommun ications", + "Ġlo an", + "Ġrat io", + "ĠEm pire", + "Ġg onna", + "Ġinv ent", + "D F", + "Ġ19 70", + "ĠComm on", + "p at", + "Ġprom ised", + "Ġd inner", + "ĠH om", + "Ġcreat es", + "Ġoper ate", + "ver ty", + "ĠJ ordan", + "et ime", + "Ġsust ain", + "R eg", + "Ġincred ible", + "im a", + "Ġwar rant", + "Ġm m", + "A tt", + "Ġlaw suit", + "Ġreview s", + "it ure", + "ĠS ource", + "l ights", + "ĠF ord", + "Ġ6 3", + "g roup", + "st ore", + "Ġfeat ured", + "Ġfore ver", + "Ġpo verty", + "ĠP op", + "ĠC NN", + "az z", + "ab is", + "ach ing", + "Ġl aid", + "ĠSu pp", + "Ġfil ter", + "en a", + "ĠCommun ity", + "Ġcreat ures", + "u ction", + "ĠR oyal", + "Ġassoci ation", + "ĠCon nect", + "ĠBr ad", + "âĸ Ī", + "l ers", + "the re", + "ĠG i", + "Ġval uable", + "AC K", + "ĠT aylor", + "Ġl iquid", + "ĠAtt orney", + "ĠCar l", + "ĠF inal", + "ag a", + "ĠWil son", + "B ecause", + "ĠProf essor", + "ak a", + "Ġincred ibly", + "r ance", + "! )", + "R ef", + "s k", + "Ġsol utions", + "Ġatmosp here", + "Ġbl ame", + "um es", + "ĠN ob", + "C A", + "um ps", + "r ical", + "ĠPut in", + "ĠD est", + "or ic", + "ĠP A", + "Ġrespect ively", + "w an", + "Ġfif th", + "â Ħ¢", + "ĠC ry", + "Ġgovern or", + "res ident", + "Ġpurch ased", + "Ġh ack", + "Ġint ense", + "ob s", + "Ġorig in", + "Ġdef ine", + "Ġcare ful", + "** *", + "Ġshould er", + "Cl ick", + "Ġt ied", + "Ġdest ruction", + "ou red", + "Ġno body", + "Ġh o", + "ĠEx per", + "Ġt ip", + "\" ;", + "Ġtechn ique", + "Ġj ur", + "ĠP ok", + "b ow", + "Ġleg end", + "Ġacc ord", + "Ġbus y", + "ĠInt el", + "Ġh ang", + "ak i", + ". ]", + "âĢĶâĢĶ âĢĶâĢĶ", + "Ġsur gery", + "Ġrep rodu", + "Ġun iform", + "Ġscen es", + "c ode", + "Ġ6 2", + "l isher", + "ĠH ave", + "ph ia", + "Ġcry pt", + "Ġrec on", + "Ġsc ream", + "Ġadop ted", + "Ġsc ores", + "N e", + "ĠIt alian", + "in cluding", + "B O", + "Ġindic ated", + "Ġent ertain", + "G u", + "T ext", + "i el", + "Ġtw enty", + "Ġeng age", + "off s", + "ĠPac ific", + "Ġsm ile", + "Ġperson nel", + "Ġto ler", + "Ġdo ors", + "Ġt one", + "Ġmach ines", + "Ġent ering", + "ten ance", + "C O", + "ĠJer sey", + "Ġfore st", + "Ġhor se", + "Ġcompl aint", + "ĠSpr ing", + "y o", + "ĠPl us", + "ed ing", + "ĠRet urn", + "qu arters", + "ial s", + "c ow", + "Ġacad emic", + "Ġf ruit", + "Ġ199 6", + "og ether", + "Ġw ine", + "Ġpur su", + "ĠSte ven", + "Ġlic ens", + "Wh o", + "Ġclot hes", + "re ction", + "Ġsqu ad", + "Ġst able", + "Ġr aw", + "z ens", + "St ar", + "ut ies", + "anc er", + "Ġke ys", + "ĠM u", + "Ġcompl icated", + "ig er", + "ĠTe xt", + "Ġabs or", + "Ġ6 8", + "Ġfun ny", + "Ġrel ief", + "ĠL ew", + "ĠC ook", + "Ġch art", + "Ġdraw ing", + "G E", + "Ġmod ule", + "ĠB ull", + "I LL", + "Ġs alt", + "0000 0000", + "il le", + "Ġres ource", + "aw ay", + "adel phia", + "ĠB ru", + "Ġ6 7", + "Ġsome body", + "Ġparticip ate", + "Ġro se", + "we red", + "Ġmus cle", + "Ġcons ent", + "Ġcontin uing", + "ĠGuard ian", + "ĠOr der", + "reg on", + "Ġre ar", + "Ġprov ision", + "Ġlik ed", + "ri ent", + "Ġb ra", + "Tr ans", + "Ġmeet ings", + "Ġto x", + "Ġcon vent", + "Ġaut o", + "Ġrec ording", + "ĠSo ft", + "00 1", + "ĠR oll", + "Ġprogram ming", + "Ġp ic", + "Ġprov ed", + "Ġst ab", + "ĠA st", + "Ġca ption", + "ul ating", + "ĠAtt ack", + "Ġnew ly", + "Ġ199 7", + "f r", + "Ġdis cipl", + "ĠGree k", + "Ġed ition", + "ĠDo es", + "ĠB ox", + "if le", + "ack et", + "Ġpass es", + "Ġgu est", + "Ġac celer", + "it als", + "U D", + "Ġaut hent", + "ĠR est", + "ov al", + "t a", + "u ine", + "Ġarm or", + "ĠT own", + "Ġcomp at", + "Ġinc hes", + "Des pite", + "Ġass ign", + "he rent", + "Ġprep are", + "ĠM eg", + "oc key", + "Ġdep ends", + "Ġtrack s", + "w atch", + "Ġl ists", + "ĠN orthern", + "Ġal ter", + "re c", + "ĠE astern", + "Ġcond em", + "Ġevery where", + "? '", + "Ġaff ili", + "Ġf ought", + "\": {\"", + "Ġm ac", + "it arian", + "Ġsc ope", + "ĠA L", + "aw s", + "ar ms", + "Ġqu e", + "Ġenjoy ed", + "nes ota", + "Ġagg ressive", + "ĠSt ory", + "ĠI V", + "Ġrec ipe", + "Ġrare ly", + "ĠMed ical", + "val ue", + "ang el", + "ay ing", + "omet hing", + "Ġsub section", + "Ġs outhern", + "Ġfrequ ency", + "re te", + "roll ed", + "ult s", + "ĠN ic", + "Ġbeh alf", + "Ġsequ ence", + "ab et", + "Ġcontrovers ial", + "Ġcomp rom", + "Ġwork er", + "Ġmain ly", + "Ġal gorith", + "ĠM ajor", + "or ce", + "g ender", + "Ġorgan ized", + "Ġf ake", + "Ġconclud ed", + "ĠE D", + "ĠEx ec", + "r age", + "Ġch ances", + "ber ry", + "ĠTr ad", + "Ġconfig uration", + "Ġwithd raw", + "Ġf ro", + "ud es", + "ĠBro ther", + "ĠB rian", + "Ġtri es", + "Ġsam ples", + "Ġb id", + "ĠGold en", + "Ġphot ograph", + "if est", + "ĠD O", + "ĠPar liament", + "******** ********", + "R em", + "Ġcont est", + "Ġsign ing", + "p x", + "ĠZ eal", + "âĶĢ âĶĢ", + "E ar", + "Ġex it", + "Be fore", + "ĠCor por", + "n ull", + "mon th", + "Ġrac ial", + "ott ed", + "ĠV eg", + "ĠRe uters", + "Ġsw ord", + "ps on", + "ĠRom ney", + "a ed", + "Ġt rib", + "Ġin ner", + "Ġprot ocol", + "ĠB i", + "ĠM iami", + "ever al", + "p ress", + "Ġsh ipping", + "ĠAm endment", + "ĠHow ard", + "con nect", + "ĠD isc", + "ĠJ ac", + "iam ond", + "ĠThere fore", + "s es", + "ĠPrin cess", + "ĠUS B", + "ĠAn th", + "Ġsurve illance", + "Ġap olog", + "Ġ6 1", + "ow a", + "Ġf ulf", + "j s", + "Ġl uck", + "ust ed", + "Ġ §", + "n i", + "Ġant icip", + "em an", + "Ġwin ner", + "Ġsil ver", + "ll a", + "ic ity", + "Ġunus ual", + "Ġcr ack", + "Ġt ies", + "e z", + "Ġpract ical", + "Ġprov ince", + "ĠPl ace", + "Ġprior ity", + "IC E", + "Ġdescrib es", + "Ġbr anch", + "F orm", + "ask a", + "miss ions", + "b i", + "Ġp orn", + "ĠTur k", + "Ġent hus", + "Ġf ighters", + "Ġ0 8", + "ĠDet roit", + "Ġfound ation", + "av id", + "A re", + "Ġjud gment", + "cl ing", + "Ġsol ve", + "ĠDes ign", + "W here", + "hes is", + "ĠT ro", + "a fter", + "Ġne utral", + "ĠPalestin ian", + "ĠHolly wood", + "Ġadv is", + "ĠN on", + "y es", + "ol is", + "Ġrep utation", + "Ġsm ell", + "Ġb read", + "ĠB ul", + "ĠBe ach", + "Ġclaim ing", + "Ġgen etic", + "Ġtechn ologies", + "Ġupgr ade", + "row s", + "Ġdevelop er", + "ĠJ osh", + "ĠDis ney", + "erv ed", + "ip al", + "Ġun ex", + "Ġbare ly", + "t hen", + "ĠP ub", + "Ġill ness", + "et ary", + "ĠB al", + "Ġp atch", + "Ġbut t", + "Ġst upid", + "ĠD og", + "ĠD allas", + "f ront", + "ie ce", + "Ġprot ests", + "Ġch at", + "oen ix", + "Ġw ing", + "Ġpar liament", + "Ġ7 7", + "ose xual", + "Ġre nder", + "pt ions", + "ĠCo ast", + "os a", + "ĠG reg", + "h op", + "ĠMan agement", + "Ġbit coin", + "Ġrec over", + "Ġincor por", + "or ne", + "ĠUs ing", + "Ġpre ced", + "Ġthreat ened", + "Ġspirit ual", + "ĠE vent", + "ĠF red", + "Ġadvert ising", + "Ġimprove ments", + "ĠC ustom", + "Ġer rors", + "Ġsens itive", + "ĠN avy", + "Ġcre am", + "L ook", + "Ġex clusive", + "Ġcomp rehens", + "Ġde leg", + "Ġcon ce", + "Ġrem em", + "Ġstruct ures", + "Ġst ored", + "N D", + "Ġ1 000", + "U P", + "ĠB udd", + "A F", + "w oman", + "ĠAcad emy", + "ð Ł", + "se a", + "Ġtem porary", + "Ab out", + "es ters", + "Ġtick ets", + "Ġposs ess", + "in ch", + "o z", + "Ġl a", + "Ġcontract s", + "Ġun p", + "Ġc ig", + "ĠK at", + "ult ural", + "as m", + "Ġmount ain", + "ĠCapt ain", + "St ep", + "m aking", + "ĠSp ain", + "Ġequ ally", + "Ġl ands", + "at ers", + "Ġreject ed", + "er a", + "im m", + "ri x", + "C D", + "Ġtrans action", + "g ener", + "less ly", + "Ġ| |", + "Ġc os", + "ĠHen ry", + "Ġprov isions", + "Ġg ained", + "Ġdirect ory", + "Ġra ising", + "ĠS ep", + "ol en", + "ond er", + "Ġcon sole", + "in st", + "Ġb om", + "Ġunc ertain", + "1 50", + "ock ing", + "Ġmeas ured", + "Ġpl ain", + "Ġse ats", + "Ġd ict", + "S L", + "af e", + "Ġest imate", + "iz on", + "at hered", + "Ġcontribut ed", + "Ġep isodes", + "omm od", + "G r", + "AN T", + "Ġ6 9", + "G ener", + "Ġ2 50", + "vious ly", + "rog en", + "Ġterror ism", + "Ġmove ments", + "ent le", + "oun ce", + "ĠS oul", + "Ġpre v", + "ĠT able", + "act s", + "ri ors", + "t ab", + "Ġsuff er", + "Ġn erv", + "Ġmain stream", + "ĠW olf", + "Ġfranch ise", + "b at", + "Ġdem ands", + "Ġag enda", + "Ġdo zen", + "Ġclin ical", + "iz ard", + "ĠO p", + "t d", + "Ġvis ited", + "ĠPer haps", + "Ġact or", + "Ġde lic", + "Ġcont ribute", + "Ġin ject", + "ĠE s", + "ac co", + "Ġlist ening", + "Ġcon gress", + "epend ent", + "Ġprem ium", + "Ġ7 6", + "ĠIr ish", + "Ġass igned", + "ĠPh ys", + "Ġworld wide", + "Ġnarr ative", + "ot ype", + "m ont", + "b ase", + "ĠB owl", + "ĠAdminist ration", + "Ġrel ation", + "ĠE V", + "C P", + "Ġco vers", + "Ġ7 8", + "Ġcert ific", + "Ġgr ass", + "Ġ0 4", + "pir acy", + "ir a", + "Ġengine ering", + "ĠM ars", + "Ġun employ", + "ĠFore ign", + "st ract", + "Ġv en", + "Ġst eal", + "Ġrepl ied", + "Ġult imate", + "Ġtit les", + "d ated", + "Ġj oy", + "a us", + "Ġhy per", + "ak u", + "Ġoffic ially", + "ĠPro duct", + "Ġdifficult y", + "per or", + "Ġresult ed", + "rib ed", + "l ink", + "wh o", + "~~ ~~", + "ĠSpe ed", + "ĠV iet", + "W ind", + "ĠBar ack", + "Ġrestrict ions", + "ĠSh are", + "Ġ199 5", + "ition ally", + "Ġbeaut y", + "op t", + "Ġm aps", + "ĠC R", + "ĠN ation", + "ĠCru z", + "W ill", + "Ġelectric ity", + "Ġor g", + "Ġb urd", + "Ġviol ation", + "Ġus age", + "Ġper mit", + "ĠCh ron", + "ĠF ant", + "Ġn aturally", + "Ġ0 7", + "Ġth rown", + "ĠAw oken", + "Ġal ien", + "ĠHer o", + "ĠK ent", + "ĠR ick", + "ri ke", + "Ġp ace", + "}, {\"", + "G L", + "Ġpo ison", + "ĠT ower", + "Ġform al", + "al ysis", + "Ġgen uine", + "Ġk il", + "a ver", + "Ġproced ure", + "ĠPro p", + "intend o", + "ĠM ain", + "as ant", + "Ġtr ained", + "G ame", + "ĠL oad", + "ĠM A", + "Ġcru cial", + "Ġle ts", + "ĠF R", + "Ġch ampion", + "1 01", + "ĠCon ference", + "Ġwrit ers", + "Ġconnect ions", + "Ġo kay", + "ir ms", + "ĠR and", + "Ġenc ounter", + "ĠB uff", + "Ġachie ved", + "Ġche cks", + "isc ons", + "Ġassist ant", + "Ġwhen ever", + "ĠA ccess", + "ĠU r", + "b in", + "Ġcl ock", + "is p", + "op her", + "Ġb orrow", + "Ġm ad", + "Ġperson ality", + "on ly", + "IS T", + "ab ama", + "Ġg ains", + "Ġcommon ly", + "Ġter r", + "Ġhyp ot", + "Ġre ly", + "Ġt iss", + "iscons in", + "Ġrid ic", + "f unction", + "ĠO regon", + "Ġun com", + "r ating", + "el and", + "ĠN C", + "Ġm oon", + "ann on", + "Ġvulner able", + "ut ive", + "³³ ³³", + "ĠRad io", + "Ġw estern", + "se ct", + "ĠT ony", + "Ġocc urs", + "ĠO s", + "ĠH on", + "à Ń", + "Ġv essel", + "ĠScot land", + "Ġdiscrim ination", + "Ġsubsequ ent", + "st ring", + "Ġfant asy", + "ĠSh adow", + "Ġtest im", + "W E", + "it i", + "r as", + "Ġbo at", + "Ġmar ks", + "Ġord inary", + "Ġre n", + "Ġrepresent ative", + "Ġpet ition", + "Ġ7 3", + "Ġad venture", + "Ġign ore", + "ĠPhil adelphia", + "ĠS av", + "V P", + "Ġfact ory", + "Ġt asks", + "Ġdep ression", + "z ed", + "................ ................", + "ĠSt orm", + "Ġc ogn", + "Ġelig ible", + "Ġredu cing", + "v ia", + "Ġ0 5", + "Ġstri king", + "Ġdoll ar", + "h o", + "O V", + "Ġinstr ument", + "Ġphilosoph y", + "ĠMo ore", + "ĠA venue", + "Ġrul ed", + "ĠFr ont", + "IN E", + "ĠM ah", + "Ġscen ario", + "ĠNAS A", + "Ġen orm", + "Ġdeb ut", + "Ġte a", + "T oday", + "Ġabs ence", + "S im", + "Ġh am", + "le ep", + "Ġt ables", + "ĠHe art", + "M I", + "K e", + "re qu", + "V D", + "m ap", + "Ġchair man", + "Ġp ump", + "Ġrapid ly", + "v i", + "Ġsubstant ial", + "E P", + "d es", + "ch ant", + "ili pp", + "ĠS anta", + "ri ers", + "anche ster", + "L oad", + "ĠC ase", + "Ġsa ving", + "Ġ7 4", + "ĠA FP", + "er ning", + "oun ced", + "ĠMin nesota", + "ĠW as", + "Ġrec ru", + "Ġassess ment", + "ĠB ron", + "U E", + "Ġdynam ic", + "Ġf urn", + "ul ator", + "Ġprop ag", + "h igh", + "Ġacc ommod", + "Ġst ack", + "ĠS us", + "w rit", + "Ġre ven", + "ĠGod d", + "ĠZeal and", + "ab s", + "Ġbr ut", + "Ġper pet", + "h ot", + "Ġhard ly", + "ĠB urn", + "ãĤ ¹", + "Ġst y", + "Ġtrans actions", + "Ġg ate", + "Ġsc reens", + "Ġsub mitted", + "Ġ1 01", + "Ġlangu ages", + "ugh t", + "em en", + "Ġfall s", + "Ġc oc", + "Ĥ ¬", + "Ġstri kes", + "p a", + "Ġdel iber", + "ĠI M", + "Ġrel ax", + "ann els", + "ĠSen ator", + "Ġext rem", + "Ġ} ,", + "ĠDe b", + "Ġbe ll", + "Ġdis order", + "c ut", + "Ġi OS", + "Ġl ocked", + "Ġem issions", + "Ġshort ly", + "\" ]", + "ĠJud ge", + "ĠS ometimes", + "Ġr ival", + "Ġd ust", + "Ġreach ing", + "F ile", + "¯¯ ¯¯", + "ino is", + "ĠJ ason", + "Ġs atell", + "are t", + "Ġst ations", + "Ġag ric", + "ĠTechn ology", + "com es", + "ĠUn fortunately", + "ĠChild ren", + "Ġappl ies", + "ast ed", + "Ġan ger", + "ail ability", + "ĠDam age", + "Ġcomp are", + "ĠStand ard", + "Ġaim ed", + "ĠB a", + "angu age", + "Ġreg ulation", + "Ġj ury", + "Ġair port", + "Ġse ctions", + "ĠPr ince", + "em ed", + "Ġmedic ine", + "Ġh itting", + "Ġsp ark", + "ol ves", + "Ġad s", + "St ate", + "Ġfood s", + "Ġrepl acement", + "Ġch icken", + "Ġlow est", + "Ġmind s", + "Ġinvol ves", + "u i", + "Ġarr ang", + "Ġproced ures", + "ĠWh ich", + "ivers ary", + "Ġb ills", + "Ġimprove ment", + "Ġin ev", + "Ġexpect ations", + "Ġintellect ual", + "Ġsp aces", + "Ġmechan ism", + "2 50", + "bre ak", + "ĠZ e", + "ĠT enn", + "ĠB alt", + "Ġbar rel", + "Ġstat ic", + "man n", + "Pol ice", + "Ġt ips", + "Ġhand ling", + "c us", + "od ed", + "il ton", + "ir y", + "Ġjournal ists", + "our se", + "Ġcom ic", + "Ġnom ine", + "IT Y", + "Ġvers us", + "Ġlo op", + "Ġsur f", + "ĠInd ust", + "ĠHun ter", + "Ġbelief s", + "is an", + "Ġset up", + "Ġbre w", + "im age", + "Ġcomput ers", + "f ol", + "} ,\"", + "ĠMed al", + "Ġtax p", + "Ġdisplay ed", + "Ġg rav", + "Ġf iscal", + "M on", + "ĠMos cow", + "ĠK ong", + "ĠCent re", + "Ġcamer as", + "ĠMr s", + "ĠH ay", + "Ġa ver", + "ĠK elly", + "p y", + "Ġrequire ment", + "Ġent itled", + "omb ie", + "Ġsh adow", + "ag ic", + "ĠA k", + "Ġel ite", + "Ġdiv ided", + "Ġhead ing", + "Ġcop ies", + "Ġloss es", + "Ġv it", + "k ed", + "ĠB ry", + "Ġan s", + "ĠSte am", + "Ġrep orter", + "he im", + "ĠIt em", + "Ġsuper ior", + "d on", + "ere nt", + "à ¶", + "Ġtherap y", + "Ġpe ak", + "ĠMod el", + "Ġl ying", + "Ġg am", + "z er", + "r itten", + "Ġrespons es", + "Ġconsider ation", + "ĠB ible", + "Ġl oyal", + "Ġinst ant", + "Ġp m", + "ĠFore st", + "à ¼", + "Ġext end", + "Ġconv icted", + "Ġfound er", + "Ġconv in", + "ĠO ak", + "che ck", + "Ġsch olars", + "p ed", + "Ġover se", + "T op", + "c ount", + "ĠAr k", + " ·", + "Ġ0 6", + "ĠL A", + "m d", + "ĠLat in", + "im ental", + "ĠC PU", + "Ġsubst ance", + "Ġminor ity", + "Ġmanufact uring", + "E r", + "ocol ate", + "Ġatt ended", + "ĠMan ager", + "r ations", + "Ġappreci ate", + "om y", + "GB T", + "id ency", + "B L", + "Ġguarant ee", + "pos ition", + "Ġo cean", + "clud e", + "Ġhead ed", + "Ġt ape", + "Ġlo ose", + "Ġlog ic", + "Ġpro ven", + "Ġsp ir", + "Ġad mit", + "is a", + "Ġinvestig ate", + "Ġ199 4", + "sy lv", + "ĠL ost", + "c est", + "Ġ7 1", + "Ġrequest ed", + "Ġwind ows", + "ĠPok é", + "ĠWith out", + "M et", + "Ġbehavi our", + "Ġread er", + "Ġh ung", + "ĠKe ep", + "Ġro les", + "Ġimplement ed", + "Ġbl ank", + "Ġserv es", + "ĠJ ay", + "Ġc ited", + "ĠF riend", + "prof it", + "ap on", + "Ġrep air", + "it em", + "arr ass", + "Ġcrit ics", + "ad i", + "ĠF ather", + "Ġsh out", + "Ġf ool", + "Ġ8 8", + "Ġprodu cing", + "Ġl ib", + "Ġround s", + "Ġcirc le", + "Ġpre par", + "Ġsub mit", + "Ġn ic", + "mor row", + "ãĥ «", + "U nder", + "Ġv ital", + "ater n", + "Ġpass word", + "Ġpublic ation", + "Ġprom inent", + "Ġspeak s", + "Ġb ars", + "Ġde eper", + "ĠM ill", + "port ed", + "Ġw id", + "Ġbut ter", + "Ġsm oking", + "Ġindic ates", + "K ey", + "rop ri", + "ĠF ile", + "all ing", + "ast ing", + "ĠR us", + "Ġad j", + "Ġ7 9", + "av al", + "Ġpres um", + "bur gh", + "on ic", + "Ġf ur", + "Ġpoll s", + "ik a", + "Ġsecond ary", + "Ġmon ster", + "ig s", + "ĠCur rent", + "E vent", + "Ġowners hip", + "end ar", + "Ġarri ve", + "ĠT ax", + "Ġn ull", + "ĠPri v", + "Ġth ro", + "Ġk iss", + "c at", + "Ġup set", + "ang le", + "it ches", + "ect or", + "olog ists", + "ĠGal axy", + "Ġcor ruption", + "Ġh int", + "ent er", + "ĠH ospital", + "Ġgreat ly", + "Ġbeg un", + "es y", + "Ġso il", + "ĠAnt on", + "Ġmain tenance", + "ãĥ ©", + "Ġdo zens", + "Ġhuman ity", + "ĠAl abama", + "Ġr om", + "w orth", + "ap ing", + "sylv ania", + "l ah", + "Ġg athered", + "G A", + "Ġattack ing", + "f ound", + "ĠSqu are", + "Ġar bit", + "ict ions", + "ĠW isconsin", + "Ġd ance", + "ĠS aint", + "arch y", + "Ġbase ball", + "Ġcontribut ions", + "Ġliter ature", + "Ġex ha", + "per ty", + "t est", + "Ġb ab", + "Ġcontain er", + "let ter", + "Ġfall en", + "Ġwebs ites", + "Ġbott le", + "ĠS ac", + "Ġbre ast", + "ĠP L", + "Ġveter an", + "Ġinterview s", + "ĠA le", + "Ġb anned", + "eng ers", + "ĠRev olution", + "in th", + "Ġconc erning", + "IV E", + "Ġexp enses", + "ĠMatt hew", + "ĠColumb ia", + "d s", + "ist ance", + "Ġent ity", + ".. .\"", + "Ġrel iable", + "Ġpar alle", + "ĠChrist ians", + "Ġopin ions", + "Ġin du", + "l ow", + "Ġcompet e", + "Ġth orough", + "Ġemploy ed", + "Ġestablish ment", + "ig en", + "ĠC ro", + "Ġlawy ers", + "ĠSt ation", + "T E", + "ĠL ind", + "ĠP ur", + "it ary", + "Ġeffic iency", + "âĢ IJ", + "ĠL y", + "Ġm ask", + "Ġdis aster", + "Ġag es", + "ER E", + "es is", + "ĠH old", + "Ġcas ual", + "b led", + "Ġen abled", + "ĠEn vironment", + "ĠInt elligence", + "i per", + "ĠM ap", + "ĠB E", + "Ġemer ged", + "is dom", + "Ġc abin", + "Ġregist ration", + "Ġfing ers", + "Ġro ster", + "Ġfram ework", + "ĠDo ctor", + "et ts", + "Ġtransport ation", + "Ġaware ness", + "H er", + "Ġattempt ing", + "O ff", + "ĠSt ore", + "ÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤ", + "ĠK now", + "Ġdef ence", + "Ġsc an", + "ĠT en", + "ĠCh air", + "ĠP H", + "ĠAtl anta", + "Ġfuck ing", + "Ġans wered", + "b n", + "ĠK ar", + "Ġcateg ories", + "Ġr ational", + "Ġc ust", + "Ġrob ot", + "Ġcorrect ly", + "Ġg if", + "Ġgraph ics", + "m ic", + "Ġground s", + "ĠO pp", + "i ate", + "Ġdist ributed", + "Ġsan ctions", + "Ġchalleng ing", + "ut o", + "Ġingred ients", + "Ġinv ited", + "Ġfound ed", + "ĠRe qu", + "d ed", + "Ġb owl", + "Ġbrother s", + "ĠH a", + "I O", + "Ġw ages", + "im ore", + "oc ial", + "Ġse ed", + "ative ly", + "Ġaddress es", + "ĠI owa", + "ab eth", + "Ġatt itude", + "is d", + "ch ild", + "Ġm ole", + "Ġdisco very", + "y ard", + "B r", + "Ġ8 2", + "Ġsuppl ies", + "ell ing", + "Ġdist ingu", + "C R", + "Ġre cept", + "Ġ vert", + "Ġsw im", + "b ec", + "d oor", + "ĠY eah", + "Ġg al", + "Ġinter act", + "ĠE SP", + "ĠC S", + "amp s", + "Ġconvin ced", + "Ġobject ive", + "Ġdis h", + "ĠPhot os", + "l ad", + "Ġdownt own", + "o il", + "in ction", + "Ġto morrow", + "ĠC OM", + "Ġsurv ival", + "sh ot", + "Ġsett lement", + "C ons", + "ĠX box", + "int erest", + "ĠS M", + "arg o", + "en ess", + "Ġeth nic", + "b ered", + "M in", + "ĠT ok", + "Ġinc ent", + "ĠComm and", + "Ġmain tained", + "Ġbreak s", + "br idge", + "at ar", + "ag g", + "ĠF inally", + "un icip", + "ĠO nt", + "le ft", + "Ġrecogn ition", + "Ġ* /", + "ĠP ers", + "Ġwe lf", + "Ġaddress ed", + "ĠK ansas", + "Ġvir us", + "Ġwhere as", + "Ġp apers", + "ram s", + "ĠMin istry", + "Ġple asure", + "Ġacqu ired", + "Ġd uration", + "j pg", + "Ġcal m", + "ĠN HL", + "Ġburn ing", + "Ġfold er", + "ick ed", + "ĠP y", + "ĠIll inois", + "Cl ass", + "ĠGodd ess", + "Ġperform ing", + "Ġwelf are", + "j ar", + "In ter", + "Ġl in", + "Ġenh ance", + "Ġnot ion", + "f are", + "yp es", + "ĠAre a", + "Ġcann abis", + "ĠDie go", + "f s", + "ĠM anchester", + "com m", + "in ite", + "Ġcover ing", + "ĠS ound", + "Ġ19 60", + "Ġ8 4", + "e lect", + "z ing", + "Ġcitiz en", + "Ġph ones", + "Ġr aid", + "Ġign ored", + "ĠOb ject", + "Ġu pload", + "c ard", + "Ġmod ified", + "Ġroom s", + "ia h", + "r ange", + "he ast", + "ach us", + "Ġsuggest ing", + "âĢ ĭ", + "gr ade", + "E l", + "Ġclot hing", + "Ġr h", + "ĠH an", + "un ity", + "en cing", + "ĠAust in", + "sec ution", + "t ra", + "d em", + "ĠQ ual", + "Ġhe aven", + "Ġst ages", + "Ġw edd", + "pl us", + "ific ial", + "ĠIm m", + "ĠH o", + "iet ies", + "Ġphr ase", + "Ġbr ill", + "act ory", + "Ġprov iders", + "Ġsil ence", + "Ġa er", + "ĠA I", + "ĠAd venture", + "Ġplatform s", + "Ġdemonstr ated", + "Ġinter f", + "ing ton", + "Ġr aces", + "Ġgr ade", + "ult ane", + "ĠTh rough", + "f alse", + "Ġb ow", + "ĠA B", + "Ġfl avor", + "Ġhistor ic", + "g ov", + "Ġcol our", + "Ġview ed", + "ĠEm ail", + "el come", + "Ġinter vention", + "Ġd iversity", + "Ġperiod s", + "Ġre verse", + "ĠV ery", + "Ġqu ote", + "ĠLe ft", + "th rough", + "Ġsc rew", + "Ġland ing", + "Ġp ill", + "Ġw et", + "Ġprot esters", + "Ġrepe at", + "av ed", + "er k", + "Ġsal ary", + "ĠPenn sylvania", + "St ill", + "Ġmay or", + "Ġkit chen", + "Ġfeat uring", + "ĠM useum", + "ĠT ournament", + "ĠF al", + "Ġser vers", + "U C", + "Ġany body", + "im g", + "ĠTr ade", + "ixt ure", + "the less", + "Ġfin ance", + "Ġcl osing", + "ĠPat ri", + "i ac", + "ab el", + "Ġ> >", + "or ous", + "Ġf irms", + "sc reen", + "un a", + "Ġemb arrass", + "ul se", + "Ġlet ting", + "Ġth rew", + "ile y", + "Ġch annels", + "l an", + "ĠVeg as", + "Ġse ar", + "Ġfant astic", + "ar re", + "uzz le", + "ĠD er", + "Th ose", + "Ġsw ing", + "Ġshe et", + "ind ex", + "co ver", + "og an", + "Ġvari ables", + "ĠTe ch", + "Ġsp oken", + "ac hel", + "ĠD a", + "ĠMount ain", + "Ġload ed", + "Ġfoot age", + "vers ion", + "Ġun l", + "ĠPh oenix", + "Ġthrow ing", + "Ġf iring", + "Ġtrack ing", + "Ġw idth", + "Ġstrugg ling", + "ro oms", + "ot ion", + "Ġmonth ly", + "ĠSer ver", + "Ġegg s", + "op en", + "M C", + "Ġ199 3", + "Ġh ired", + "Ġstay ed", + "ĠAll en", + "Ġst ro", + "Ġ9 8", + "st ep", + "ĠTurk ish", + "Ġfab ric", + "ist ing", + "ĠD om", + "Ġd ates", + "Ġpr on", + "Ġbasket ball", + "Ġl ucky", + "ĠArab ia", + "Ġassum ed", + "est y", + "Ġaff airs", + "Ġgl ad", + "ĠInd eed", + "ĠF A", + "ĠW ord", + "Ġjo ining", + "if ice", + "p read", + "ir ts", + "ĠSe lect", + "Ġpop ulations", + "aw are", + "Ġn ose", + "Ġcompl aints", + "st art", + "Ġsc oring", + "Th anks", + "Ġmin ing", + "Ġvisit ors", + "S H", + "Ġdam aged", + "Ġcharacter istics", + "ĠP ent", + "D C", + "Ġ8 3", + "ĠS ix", + "r ates", + "Ġfl ags", + "ĠB rew", + "d og", + "M ark", + "// //", + "Ġexec ution", + "Ġj oke", + "ph ones", + "Ġtestim ony", + "Ġob st", + "Q L", + "ĠC ut", + "Ġstud ied", + "ĠN intendo", + "ick et", + "ĠN BC", + "Ġl ad", + "ĠB ra", + "ĠM oh", + "Ġk ernel", + "Ġoverwhel ming", + "Ġag ed", + "Ġapplic able", + "ĠC ond", + "Ġroad s", + "ĠBl ock", + "m ade", + "od ge", + "Ġcomm ands", + "Ġoff ices", + "vel and", + "Ġt ut", + "Ġrece iver", + "ĠF ro", + "Ġsho pping", + "Ġi P", + "ĠSt re", + "ĠA BC", + "Ġentertain ment", + "ĠB ow", + "ort ed", + "M c", + "Ġread s", + "gr ad", + "ĠCol lect", + "Ġâ ĪĴ", + "ĠCap ital", + "eder ation", + "Ġemploy er", + "Ġinvolve ment", + "Ġanx iety", + "al ia", + "Ġro of", + "ĠAm ong", + "ĠDemocr at", + "Ġstat s", + "ĠV ill", + "Ġconst itutional", + "Ġrefer ring", + "itt y", + "Ġtack le", + "out ube", + "Ġback ed", + "ĠH ong", + "ĠBro ad", + "Ġe le", + "ĠO tt", + "Ġ199 2", + "h our", + "achus etts", + "C al", + "Ġdefe ated", + "Ġ8 1", + "es p", + "Ġseem ingly", + "w as", + "ĠJ enn", + "ĠK urd", + "Ġg ene", + "Ġdisc ount", + "R et", + "EC T", + "( );", + "Ġclub s", + "Ġs id", + "ĠM arsh", + "Che ck", + "Ġp p", + "ĠE ag", + "ides pread", + "Ġbe ings", + "F T", + "Ġintrodu ction", + "ĠCh ange", + "AR D", + "Ġ1 10", + "ad ows", + "ier ce", + "Ġme al", + "a uthor", + "ĠB ang", + "lah oma", + "Ġr anks", + "201 1", + "?? ??", + "m ax", + "Ġcoll apse", + "Ġop ens", + "Ġe cho", + "Ġs oph", + "Ġrac ist", + "Ġenorm ous", + "Ġw aves", + "Ġt ap", + "Ġcomprehens ive", + ". --", + "ĠR oy", + "Ġfarm ers", + "Rel ated", + "a ired", + "ron es", + "ĠC rim", + "Ġproport ion", + "Ġdesign s", + "Ġnegoti ations", + "Ġvirt ually", + "ĠBat man", + "Ġwar n", + "Ġlegit imate", + "m ate", + "Ġcon vention", + ", ,", + "net ic", + "ĠS D", + "Ġconsist ently", + "Ġcompens ation", + "Ġpunish ment", + "Ġy e", + "Ġt ie", + "ĠB ureau", + "ir lf", + "ĠB u", + "ĠA ren", + "ĠPh ilipp", + "Ġkn ife", + "Ġmem ories", + "ĠR oss", + "Ġang le", + "Ġ8 6", + "ĠTh under", + "Ġre nd", + "ĠT our", + "Ġcount s", + "s ung", + "ĠIm p", + "Ġeduc ational", + "Ġaccess ible", + "C OM", + "Ġd rew", + "y er", + "G l", + "am ine", + "OR T", + "O B", + "I B", + "m aster", + "Ġtri als", + "og y", + "h ar", + "ĠTr ust", + "Ġprefer red", + "irlf riend", + "ĠN ev", + "Ġb in", + "Ġc ow", + "P age", + "Ġsign ature", + "ĠB L", + "7 00", + "Ġret ired", + "Ġby tes", + "Ġneigh b", + "ĠLeg end", + "Ġdev ast", + "Ġsuspect ed", + "is ons", + "ĠPoké mon", + "sc ale", + "Ġcap abilities", + "Ġre vel", + "Ġche ese", + "d y", + "igr ant", + "Ġfail ing", + "b its", + "ĠHer oes", + "ĠG host", + "ĠS cient", + "Ġappoint ed", + "ur i", + "Ġinst itution", + "Ġexpand ed", + "g reg", + "Ġmonitor ing", + "Ġp odcast", + "Ġcoal ition", + "Ġ9 6", + "J o", + "Ġst olen", + "ĠS ab", + "Ġstop s", + "Ġhol iday", + "Ġint r", + "C ar", + "Bl ack", + "ĠL GBT", + "Ġwar ming", + "ĠAnd erson", + "Ġ8 9", + "Ġprodu cer", + "M ed", + "Ġaccur acy", + "ĠMar vel", + "iz abeth", + "ĠPat rick", + "m ony", + "Ġmin i", + "ac les", + "Ġover t", + "the y", + "Ġmembers hip", + "ĠV en", + "Ġex ch", + "Ġrem oval", + "ĠD ave", + "T Y", + "m ad", + "ĠF ind", + "Ġad equ", + "Ġe c", + "Ġte eth", + "Ġemot ion", + "Ġper m", + "Ġsole ly", + "d b", + "Ġextra ord", + "IG HT", + "c al", + "Ġgu idelines", + "Ġd ying", + "Ġsusp ended", + "ĠPrem ier", + "ĠAnth ony", + "el ve", + "Ġd ad", + "ĠE th", + "ĠFoot ball", + "Ġabandon ed", + "Ġ< <", + "Ġm arch", + "Ġhor ror", + "âĢ¦ \"", + "Ġchild hood", + "Ġcampaign s", + "Ġl unch", + "ĠAl bert", + "bl ock", + "âĸĪ âĸĪ", + "ound ing", + "Ġb one", + "or gan", + "ad ers", + "ĠFl ash", + "ĠDri ve", + "Ġton ight", + "Ġw ars", + "ĠF L", + "Ġform ation", + "con st", + "New s", + "Ġcom pe", + "or ious", + "ĠSt aff", + "Ġdiscuss ions", + "ĠProt ection", + "ĠJ am", + "Ġcrit eria", + "Ġinstall ation", + "Ġaccompl ish", + "iz za", + "Ġpub lisher", + "Ġresc ue", + "ĠT ry", + "U LL", + "ĠS om", + "ĠH op", + "ore t", + "th s", + "ord on", + "Ġp ocket", + "ĠIn v", + "Down load", + "ĠCr ime", + "Ġb ene", + "ĠGu ide", + "ĠAs sembly", + "Ġparam eters", + "I E", + "ĠAlex ander", + "Ġconc ert", + "ĠSc he", + "Ġsh oes", + "Ġvis iting", + "Ġrec all", + "Ġb ub", + "Ġr ural", + "Ġconc rete", + "ĠR os", + "N ext", + "R uss", + "Ġlo ans", + "ĠSh ield", + "Ġtre m", + "hem at", + "k g", + "ĠHar ris", + "is ition", + "ĠM ove", + "ĠF C", + "Ġf ate", + "ĠCh o", + "Ġt ired", + "Ġprinc ipal", + "h ist", + "ien ces", + "ath y", + "Ġse vent", + "Ġm ood", + "Ġstrateg ic", + "Ġdise ases", + "Ġfor um", + "Ġtem por", + "Ġhead quarters", + "P ar", + "ig e", + "fl ix", + "Ġgu itar", + "Ġ9 4", + "On ly", + "Ġrele ases", + "ro ph", + "================ ================", + "Ġ6 00", + "ĠContin ue", + "ig ate", + "ĠC rit", + "sy stem", + "Ġdis abled", + "Ġunex pected", + "ith ub", + "Ġuncle ar", + "ĠE st", + "Ġcontr ad", + "Ġstrateg ies", + "vent ures", + "Ġpass age", + "AM E", + "Ġimpro ving", + "Ġreve als", + "Ġdecre ase", + "ov a", + "Ġann oy", + "ĠSh ort", + "ĠL ibrary", + "Ġcy ber", + "n ell", + "ĠH ur", + "ĠC B", + "Ġphot ograp", + "U I", + "Ġs ed", + "G e", + "Ġ8 7", + "Ġd iverse", + "Ġencour aged", + "Ġcons piracy", + "Ġbird s", + "Ġoper ator", + "Ġhand ful", + "Ġclass ified", + "? )", + "Ġdram atic", + "Ġinvestig ators", + "it o", + "Ġw idespread", + "ĠR oom", + "-------------------------------- --------------------------------", + "Ġcollect ive", + "Ġjournal ist", + "St ring", + "Ġtemper atures", + "il a", + "Ġgu id", + "Ġins pect", + "Ġmiss ile", + "ĠMay or", + "Ġman ual", + "Ġsim ultane", + "Ġrat ings", + "Ġsu ck", + "Ġ9 7", + "Ġunivers al", + "Ġph arm", + "Ġdis rupt", + "ian o", + "A V", + "Ġf t", + "Ġstat ist", + "old s", + "ĠWalk er", + "ph p", + "Ġunder t", + "ĠL as", + "ish op", + "nt il", + "res hold", + "ĠWhe ther", + "M s", + "Ġden y", + "ĠCl oud", + "Ġprov ider", + "Ġsurv iv", + "ĠUp date", + "h as", + "Ġmist akes", + "ch arge", + "pl ed", + "r ity", + "Ġn ode", + "ĠMass achusetts", + "ool s", + "lic ation", + "Ġf ails", + "em ale", + "or i", + "back s", + "Ġsh irt", + "Ġ' '", + "ĠN AT", + "Ġwat ers", + "els on", + "Ġe ase", + "Ġsc ar", + "Ġcont ents", + "m ind", + "Ġcont ribution", + "Ġsh r", + "Ġhand ed", + "Ġst ability", + "Ġtra ve", + "E m", + "Ġmir ror", + "12 3", + "Ġwe igh", + "Ġf iction", + "ou ver", + "ist ant", + "r ition", + "ĠF ed", + "Ġphys ically", + "Ġst ake", + "ĠArt icle", + "ĠAr c", + "ĠLew is", + "ĠM ind", + "Ġdemonstr ate", + "Ġprof its", + "v ision", + "om ic", + "ol id", + "Ġbatt les", + "Ġdri ves", + "Ġeas tern", + "ĠS ony", + "!! !", + "ar ation", + "v ard", + "ĠG L", + "port ation", + "Ġ9 2", + "Ġlaw makers", + "Ġprotect ing", + "ĠE PA", + "Ġy eah", + "Ġsh ame", + "ol ph", + "e ven", + "x it", + "Ġatt ach", + "Ġrepresent ing", + "Ġob s", + "ĠUt ah", + "iff s", + "ĠFre edom", + "à ³", + "A K", + "Ġinc idents", + "it age", + "Ġview ers", + "c d", + "Ġm ouse", + "Ġcl ar", + "Ġaccord ance", + "Ġb ot", + "c or", + "ĠSum mer", + "he ld", + "Ġinnoc ent", + "Ġiniti ative", + "ol s", + "________________ ________________", + "Ġsp ots", + "p ace", + "Ġconvent ional", + "Ġcorpor ations", + "Ġblock ed", + "H D", + "at tered", + "Ġref ers", + "Ġbu ck", + "ĠDig ital", + "12 0", + "Ġtop ics", + "T F", + "Ä ģ", + "br id", + "re ement", + "Ġunder lying", + "ĠM ember", + "Ġinvestig ating", + "Ġpregn ancy", + "Ġtouch down", + "ĠB and", + "ĠCall er", + "Ġinst ances", + "P P", + "w a", + "G ood", + "Ġ199 1", + "ĠC old", + "Ġfear s", + "Ġrem arks", + "Ĩ Ĵ", + "at al", + "Ġm it", + "Ġexper iments", + "i pt", + "Col or", + "ind u", + "Up date", + "Ġ9 3", + "A g", + "Ġ å", + "anc ouver", + "B oth", + "Ġjud ges", + "Ob ject", + "Ġst ere", + "umb n", + "Ġparticip ation", + "ĠSt ars", + "ĠJ ere", + "Ġweek ly", + "ĠB an", + "Ġconvers ations", + "ĠP itt", + "u z", + "ĠIndian a", + "ĠK ick", + "Ġinf ection", + "Ġhero es", + "Ġsett led", + "Ġstri p", + "Ġh al", + "Ġd ump", + "ĠS ci", + "Ġl es", + "Ġref erences", + "ĠU RL", + "ĠBr idge", + "Ġwant ing", + "For ce", + "Ġex clus", + "Me anwhile", + "m n", + "Ġg entle", + "m aker", + "sen al", + "ĠG ro", + "ou ri", + "ĠR ain", + "ĠAll iance", + "Ġl ift", + "el a", + "S D", + "ĠCle veland", + "Ġrank ed", + "Ġst adium", + "Ġdead ly", + "ä ¸", + "Ġr iding", + "ar ia", + "ĠAr mor", + "Ġdocument ation", + "ĠGree ce", + "ree k", + "Ġl ens", + "ĠS a", + "Ġg ross", + "ĠE mer", + "ag ers", + "ĠD ub", + "ĠR h", + "ĠAM D", + "Ġarri val", + "Ġdes ert", + "Ġsupp lement", + "ĠRes p", + "Ġkn ee", + "Ġmarg in", + "f ont", + "og g", + "201 0", + "ĠP ir", + "ĠP rom", + "iv als", + "Ġint ake", + "Ġdifferent ly", + "ug s", + "Ġb its", + "clud ed", + "Ġsearch ing", + "ĠD u", + "um ble", + "Ġfunction al", + "ĠBalt imore", + "ĠC ould", + "Ġdes ired", + "Ġcirc uit", + "ĠL yn", + "ĠG O", + "ĠF alse", + "re pre", + "' :", + "alt ies", + "Ġmin im", + "Ġdro ve", + "ĠSh ould", + "Ġh ip", + "Ġpro s", + "Ġut ility", + "ĠN ature", + "ĠM ode", + "P resident", + "o pp", + "r at", + "form ance", + "Ġconcent ration", + "Ġf ont", + "ĠB ud", + "Ġam id", + "Ġre vers", + "ĠM L", + "B ar", + "Ġinter action", + "Ġjur isd", + "Ġspell s", + "d ep", + "f il", + "Ġcivil ians", + "ut ter", + "ĠCo oper", + "ĠBel ow", + "Ġent rance", + "Ġcon vert", + "Ġcontrovers y", + "ow ered", + "Ġcontr ary", + "Ġar c", + "ĠExec utive", + "ĠOffic er", + "Ġpack ages", + "Ġprog ressive", + "w idth", + "Ġreserv ed", + "v ol", + "ĠSam sung", + "Ġprint ed", + "Ġcent ers", + "Ġintrodu ce", + "ĠKenn edy", + "Ġodd s", + "Ġsure ly", + "Ġindepend ence", + "Ġpass engers", + "repre ne", + "ĠBe h", + "Ġl oves", + "ĠESP N", + "Ġfac ilit", + "Ġident ical", + "Ġdo ct", + "Ġpartners hip", + "con f", + "ĠH ide", + "Ġconf used", + "ĠC ow", + "M en", + "Ġw rest", + "ĠIraq i", + "Ġh oles", + "ĠStud ies", + "Ġpregn ant", + "h ard", + "Ġsign als", + "I X", + "Ġpull ing", + "Ġgrad uate", + "Ġnomine e", + "D ate", + "Ġper mitted", + "Ġâ Ĥ¬", + "ĠOk lahoma", + "St art", + "Ġauthor ized", + "Ġal arm", + "ĠC os", + "v an", + "Ġgener ations", + "c ular", + "Ġdr agon", + "ĠSoft ware", + "ĠEd ward", + "Ġcontro ller", + "S en", + "ge red", + "ĠV ik", + "Ġappro ached", + "Th ank", + "Ġcan ce", + "Ġform ula", + "ĠSm all", + "Ġweak ness", + "Ġr amp", + "it udes", + "j ud", + "Ġbrill iant", + "Ġacc us", + "s ource", + "Ġ8 00", + "ĠE vil", + "S w", + "Ġhom eless", + "we ek", + "i ens", + "r ics", + "ĠTh ird", + "T O", + "Ġorgan ic", + "Ġpresent ation", + "ag h", + "ĠDown load", + "v ation", + "Ġas sembly", + "or able", + "hold ers", + "ĠBern ie", + "ĠHel p", + "Ġt ong", + "ĠF ight", + "Ġbe ach", + "B ook", + "ĠL ic", + "Ġr ush", + "ĠR ound", + "ou p", + "ĠMar x", + "Ġcalcul ated", + "ĠDe vil", + "ĠSar ah", + "Ġoccasion ally", + "Ġbul let", + "Av ailable", + "g ate", + "Ġ9 1", + "Ġh osp", + "Ġprom ises", + "ĠH IV", + "ĠSt adium", + "ĠSt ock", + "ĠCorpor ation", + "g age", + "N G", + "ĠC redit", + "Ġs ne", + "ib l", + "Ġacc um", + "s uch", + "Ġterror ists", + "Ġconscious ness", + "ĠZ h", + "Ġdram a", + "ool a", + "pir ation", + "Ġlab our", + "ĠN in", + "Ġut ter", + "Ġdemocr atic", + "Ġass ass", + "il ation", + "Ġg est", + "Ġab road", + "Ġmet ab", + "Ġs orts", + "Ġfl av", + "U B", + "Ġm g", + "ĠNot hing", + "ĠO d", + "Ġmus ical", + "200 9", + "Ġdro ps", + "oc ated", + "ater al", + "0000 00", + "Ġg re", + "Ġequ ality", + "Ġburd en", + "Ġv ig", + "ĠLe ader", + "-------- ----", + "Ġcere mony", + "Ġf ighter", + "Ġact ors", + "Ġ æ", + "am an", + "F i", + "Ġal ign", + "put er", + "Ġe lder", + "ĠN SA", + "Ġrepresent ation", + "ĠOnt ario", + "IT H", + "usal em", + "Ġharass ment", + "itz er", + "Ġsy mp", + "Ġbox es", + "ĠD R", + "Ġman ifest", + "at re", + "Ġ ^", + "Ġd ies", + "le ton", + "Ġmiss ions", + "et he", + "Ġres olve", + "Ġfollow ers", + "Ġas c", + "Ġk m", + "l ord", + "am med", + "Ġsil ent", + "ĠAssoci ated", + "Ġtim ing", + "Ġprison ers", + "ĠK ings", + "ĠF ive", + "Ġtow er", + "Ġappro aches", + "Ġprecise ly", + "Ġb ureau", + "ĠM other", + "ĠI ss", + "Ġkey board", + "it ual", + "Ġfund ed", + "Ġstay ing", + "Ġpsych ological", + "Ġm ile", + "ĠLe on", + "ĠBar b", + "w ill", + "Ġw ider", + "ĠAtl antic", + "Ġt ill", + "ĠR ome", + "ro t", + "Ġaccomp an", + "Ġfl our", + "ac o", + "W orld", + "ĠExp ress", + "ĠY u", + "C or", + "Ġple ased", + "part y", + "Ġpoint ing", + "Ġinf lation", + "Ġro y", + "Ġ ),", + "ain er", + "Ġwedd ing", + "orm on", + "Ġrequ iring", + "Ġqual ified", + "Ġse gment", + "EN D", + "Ġs izes", + "e als", + "Ġcor rupt", + "ass ador", + "Ġcele b", + "Ġdream s", + "ĠM ess", + "Ġcheck ing", + "ĠV ersion", + "Ġprep aring", + "Ġact ively", + "ĠD iff", + "Ġl ux", + "ĠW inter", + "act eria", + "ĠN E", + "Ġdep uty", + "Ġtrans gender", + "Ġsum mary", + "Ġin her", + "er ies", + "ch ar", + "ĠY an", + "Ġkn ock", + "ĠP ath", + "Ġl ip", + "roll er", + "Ġimp ression", + "Ġcelebr ate", + "Ġsl ide", + "Ġgu ests", + "Ġcl ip", + "F S", + "Ġsav ings", + "Ġcapt ain", + "Ġleg acy", + "ĠDen ver", + "Ġw ounded", + "tab oola", + "AC T", + "Ġpurs ue", + "Ġo xy", + "Ġ q", + "Ġsem i", + "ĠN eed", + "ĠAff airs", + "Ġob sc", + "Ġcheck ed", + "Ġd ual", + "C ode", + "ĠM D", + "le m", + "ult y", + "Ġ ©", + "ĠEl izabeth", + "Ġcent uries", + "ard ed", + "s rc", + "Ġev ident", + "enn is", + "at in", + "Ġunemploy ment", + "ĠMar io", + "Ġint im", + "Ch rist", + "Ġbi ological", + "Ġsold ier", + "ĠAdd ed", + "Ġm ath", + "ĠG il", + "Ġbi as", + "Ġd ating", + "ĠO cean", + "Ġm ice", + "M us", + "h ire", + "ĠT es", + "Ser ver", + "lim ited", + "S ize", + "Ġmet ers", + "Ġrock et", + "es see", + "Ġcertific ate", + "ĠIran ian", + "AS S", + "Ġgr id", + "D ec", + "Ġro lling", + "com mun", + "ĠSwed en", + "b ury", + "Ġtiss ue", + "Ġrac ism", + "ĠL ocal", + "Ġmyster y", + "Ġexam ine", + "Ġst em", + "Ġs its", + "Ġhop ed", + "ot ing", + "Ġdial ogue", + "Ġpers u", + "W atch", + "l ay", + "M AN", + "Ġch ronic", + "ĠPort land", + "mark et", + "ĠS EC", + "Ġparalle l", + "Ġsc andal", + "Ġcar ries", + "Ġphenomen on", + "h uman", + "ack er", + "ĠO x", + "Ġretire ment", + "tain ment", + "ov ie", + "ĠG ear", + "Ġd uties", + "Ġdo se", + "Ġsc roll", + "M B", + "in f", + "Ġsa uce", + "Ġland scape", + "red dit", + "ĠChampions hip", + "ĠRed dit", + "al id", + "Ġco in", + "Ġover s", + "Ġpost ing", + "ab out", + "Ġf el", + "and y", + "Ġb old", + "Ġfocus ing", + "e ffect", + "G R", + "Ġde emed", + "Ġrecommend ations", + "Ġste pped", + "Ġvot er", + "ĠDe ep", + "ĠInst agram", + "Ġmoder ate", + "ĠMary land", + "Ġrestrict ed", + "ĠM B", + "ĠCh all", + "Ġto b", + "Ġc ir", + "ĠO cc", + "ĠE ver", + "Ġcoll aps", + "IN FO", + "= -", + "ĠP ict", + "ĠAcc ount", + "n c", + "Ġo ught", + "Ġex port", + "Ġdr unk", + "( '", + "Ġw ise", + "ĠM ort", + "ne cess", + "Ġan cest", + "ĠInc re", + "Ġfrequ ent", + "m ir", + "Ġinterpret ation", + "Ġdepend ent", + "Ġco ins", + "ĠB ol", + "V ideo", + "ĠJust in", + "Ġfat al", + "Ġcook ing", + "Ġconf usion", + "ip her", + "Ġcust ody", + "ĠMor gan", + "om ach", + "ĠGovern or", + "Ġrestaur ants", + "el ing", + "Ġacknowled ged", + "Ġthe r", + "Ġgen es", + "ch ing", + "He y", + "Ġtact ics", + "ĠMex ican", + "Ġv end", + "Ġhe s", + "qu er", + "Ġnot ing", + "ĠCamer on", + "Ġtarget ing", + "ro ck", + "Ġcred its", + "Ġemot ions", + "Ġrepresent atives", + "new s", + "Ġlegisl ative", + "Ġrem oving", + "Ġtweet ed", + "ĠCar ter", + "ĠF ixed", + "Ġfor cing", + "Ġspeak er", + "Ġm ales", + "ĠViet nam", + "l ined", + "Ġconcept s", + "Ġvo ices", + "o ir", + "ĠT rib", + "W he", + "ĠJer usalem", + "ĠS ant", + "Ġc ul", + "Ġl ady", + "ĠHaw ai", + "Ġar ts", + "ĠIn n", + "ĠMach ine", + "ĠEm peror", + "Ġsl ot", + "g ly", + "ĠPro cess", + "II I", + "Ġathlet es", + "ĠTem ple", + "ĠRep resent", + "Ġpres c", + "Ġt ons", + "Ġgold en", + "Ġp unch", + "ĠG R", + "iver pool", + "Ġen act", + "Ġlob by", + "Ġm os", + "Ġpick ing", + "Ġlif etime", + "Ġcogn itive", + "E ach", + "z o", + "Ġd ub", + "Ġcons ists", + "ol n", + "Ġf estival", + "am ous", + "Ġint ellig", + "w ords", + "ĠSm art", + "Ġde le", + "Ġl apt", + "Ġmag ical", + "ĠS in", + "b us", + "ur ities", + "igh th", + "ĠRub y", + "ĠS ure", + "ol ving", + "Ġj un", + "O ST", + "Ġimp osed", + "Ġast ron", + "Ġcor rel", + "ĠN S", + "ĠK it", + "ĠF uture", + "b urn", + "Ġimm une", + "oc us", + "Ġcour ses", + "ĠSt ring", + "Ġle an", + "Ġg host", + "Ġout comes", + "Ġexp ense", + "Ġevery day", + "Ġaccept able", + "A h", + "Ġequ ipped", + "Ġor ange", + "F R", + "ĠD utch", + "Th ough", + "ĠR ank", + "Q U", + "ĠRober ts", + "wh at", + "re nd", + "Ġdisapp ear", + "Ġsp awn", + "ĠL am", + "o is", + "Ġdes erve", + "Ġmin imal", + "Ġnerv ous", + "ĠW ould", + "Ġro ok", + "ĠV ancouver", + "Ġres ign", + "sh ire", + "ĠW orks", + "ĠB uild", + "Ġafford able", + "ĠG ary", + "ĠAren a", + "Ġh anging", + "Ġimpl ications", + "ĠS ong", + "Ġmain taining", + "Ġgu ards", + "C ON", + "Ġder ived", + "Ġexecut ed", + "Ġthe ories", + "Ġqu oted", + "ĠAnd re", + "og a", + "sel ess", + "in fo", + "ĠBel g", + "Ġt ears", + "ĠSur v", + "Ġbirth day", + "ig ious", + "im mer", + "Ġspect rum", + "Ġarchitect ure", + "Ġrec ruit", + "arm a", + "T able", + "Ġmon sters", + "ĠG ov", + "Ġdest ination", + "Ġattract ive", + "Ġf oss", + "ĠMore over", + "Ġpres ents", + "TH E", + "Ġrep ly", + "pt on", + "Ġc um", + "Ġdel ight", + "Ġaffect s", + "Ġdon ations", + "ĠT oy", + "ĠH im", + "M ENT", + "Ġover come", + "it ched", + "ĠFant asy", + "ĠH at", + "ĠBe ast", + "b ott", + "Ġinvestig ations", + "R un", + "Ġhun ting", + "d i", + "f und", + "Ġs essions", + "est yle", + "Ġport ray", + "oid s", + "Y eah", + "Ġcommun icate", + "Ġcom edy", + "ĠY ang", + "Ġbel t", + "ĠMar ine", + "Ġpredict ed", + "Pl ay", + "Ġimportant ly", + "Ġremark able", + "Ġelim inate", + "D avid", + "Ġb ind", + "V ID", + "Ġadvoc ates", + "ĠG aza", + "im p", + "D B", + "ĠN a", + "ĠSim ilar", + "I ES", + "Ġchar ity", + "v as", + "m ath", + "Ġâ ĸ", + "ok er", + "nd um", + "Ġcap s", + "ĠH al", + "2 000", + "e an", + "Ġfle et", + "Ġrec re", + "R ight", + "Ġsleep ing", + "ij ing", + "k ind", + "Ġdesign ated", + "à ¤", + "Ġanim ation", + "ke e", + "ĠInt rodu", + "Ġ/ >", + "Ġdelay ed", + "Ġtrem end", + "Ġcur ious", + "U se", + "Ġle ct", + "d am", + "Ġinnov ation", + "ĠPoint s", + "Ġload ing", + "Ġdisp ute", + "ct ic", + "ird s", + "ĠB Y", + "Ġn urs", + "ĠVal ue", + "ION S", + "ĠH um", + "Ġtem plate", + "m ers", + "Ġappear ances", + "ĠEnter tainment", + "Ġtransl ation", + "Ġsa ke", + "Ġbene ath", + "Ġin hib", + "Ġe uro", + "abet es", + "Ġstud ying", + "ĠM as", + "Ġper ceived", + "Ġexam ined", + "Ġe ager", + "Ġco aches", + "Ġim per", + "ch i", + "Ġprodu ces", + "\" ).", + "ĠEvery one", + "Ġm unicip", + "Ġg irlfriend", + "Ġh ire", + "ĠV ice", + "Ġsu itable", + "op y", + "Ġin equ", + "ĠD uke", + "f ish", + "f irst", + "ĠO bs", + "Ġinter ior", + "ĠBru ce", + "ĠR y", + "Ġanal ys", + "Ġconsider able", + "Ġfore cast", + "Ġf ert", + "ors hip", + "ĠD rug", + "ĠA LL", + ": \"", + "th ur", + "ĠM ail", + "Ġball ot", + "Ġinst antly", + "ĠCh annel", + "Ġp icks", + "Ġ198 9", + "Ġt ent", + "ol i", + "Ġcivil ian", + "b ling", + "ell o", + "b u", + "Ġin ch", + "Ġlog o", + "Ġcooper ation", + "Ġwal ks", + "Ġinvest ments", + "Ġimp rison", + "ĠF estival", + "ĠK y", + "Ġleg ally", + "Ġg ri", + "ch arg", + "S l", + "Ġthreat ening", + "du ction", + "fl ow", + "Ġdismiss ed", + "ibr aries", + "c ap", + "e le", + "ĠMc G", + "ĠHar vard", + "ĠConserv ative", + "ĠC BS", + "p ng", + "Ġro ots", + "ĠH aving", + "umb led", + "ĠF un", + "\\ /", + "ĠS earch", + "ple x", + "Ġdiscuss ing", + "Ġcontin u", + "ĠT ai", + "ĠW ik", + "F ree", + "f it", + "Ġref use", + "Ġmanag ing", + "Ġsy nd", + "ip edia", + "w alk", + "Ġprofession als", + "Ġguid ance", + "Ġunivers ities", + "Ġas semb", + "unt u", + "F inally", + "AS E", + "ĠAut o", + "ĠH ad", + "Ġann iversary", + "L D", + "ĠD ur", + "ĠUlt imate", + "ih ad", + "pro duct", + "Ġtrans it", + "Ġrest ore", + "Ġexpl aining", + "Ġass et", + "Ġtransfer red", + "Ġbur st", + "ap olis", + "ĠMag azine", + "ĠC ra", + "ĠB R", + "gg ed", + "ĠH E", + "M ich", + "b et", + "ĠL ady", + "yl um", + "erv es", + "Ġme ets", + "wh ite", + "L og", + "Ġcorrespond ing", + "Ġins isted", + "G G", + "Ġsurround ed", + "Ġt ens", + "Ġl ane", + "Ġco inc", + "h ome", + "Ġexist ed", + "ect ed", + "ĠDou ble", + "lam m", + "Ġske pt", + "ex p", + "Ġper ception", + "ie v", + "ĠBe ing", + "o ft", + "Ġadop t", + ". :", + "] ;", + "Wind ows", + "Ġsatell ite", + "AS H", + "Ġinf ant", + "d escription", + "ĠMe anwhile", + "c m", + "oc a", + "ĠT reat", + "act or", + "Ġtob acco", + "ĠN orm", + "em ption", + "Ġfl esh", + "Ġj e", + "o op", + "ĠHe aven", + "Ġbe ating", + "an im", + "Ġgather ing", + "Ġcult iv", + "G O", + "ab e", + "ĠJon athan", + "ĠSaf ety", + "Ġbad ly", + "pro t", + "Ġcho osing", + "Ġcontact ed", + "Ġqu it", + "Ġdist ur", + "Ġst ir", + "Ġto ken", + "D et", + "ĠP a", + "Ġfunction ality", + "00 3", + "s ome", + "Ġlimit ations", + "Ġmet h", + "b uild", + "con fig", + "N T", + "re ll", + "ble m", + "ĠM om", + "Ġveter ans", + "ĠH u", + "Ġtrend s", + "are r", + "ĠG iven", + "ĠCa ption", + "m ay", + "AS T", + "Ġwond ering", + "ĠCl ark", + "n ormal", + "Ġsepar ated", + "Ġdes p", + "st ic", + "b rew", + "Ġrel ating", + "ĠN ik", + "ĠF arm", + "Ġenthus i", + "g ood", + "d eb", + "Ġactiv ist", + "Ġm art", + "Ġexplos ion", + "ĠEconom ic", + "L ink", + "Ġins ight", + "Ġconven ient", + "Ġcounter part", + "su pport", + "ĠV irt", + "ag en", + "ĠTenn essee", + "ĠSim on", + "ĠA ward", + "OC K", + "ĠF igure", + "Ġoverse as", + "Ġpr ide", + "ĠC as", + "n ote", + "m g", + "C urrent", + "Ġdispl ays", + "cont ent", + "Ġtravel ing", + "Ġhosp itals", + "ĠFin ancial", + "ĠP ast", + "Ġdefend ant", + "Ġstream ing", + "m ble", + "ĠBer lin", + "uk i", + "Ġdist ribut", + "Ġant ib", + "Ġch ocolate", + "ĠCast le", + "Ġinter rupt", + "ĠR ow", + "Ġconvers ion", + "Ġbug s", + "ĠR ather", + "li est", + "L Y", + "ĠJe an", + "com mon", + "ak h", + "Ġ1 30", + "ot ton", + "ĠDe an", + "Ġam endment", + "Ġgame play", + "ĠWar ren", + "od a", + "Ġhigh lights", + "Ġir re", + "ĠNAT O", + "Ġball s", + "Ġdemand ing", + "U RE", + "ĠL uke", + "F igure", + "st op", + "on ia", + "z one", + "iz ers", + "ĠW R", + "Ġaward ed", + "Ġregul atory", + "ĠH art", + "ĠS N", + "pl ing", + "Ġs our", + "ĠP ixel", + "us ive", + "Ġf et", + "ĠS ent", + "Ġautom atic", + "Ġf er", + "vern ment", + "ĠKh an", + "T ON", + "f ather", + "Ġextraord inary", + "th rop", + "ĠP ython", + "ĠG PU", + "Ġsex ually", + "Ġdesk top", + "it ivity", + "ĠAnton io", + "Ġo rient", + "Ġe ars", + "ob by", + "ous es", + "vertis ements", + "Ġmanufacture rs", + "ic ient", + "min ute", + "Ġconv iction", + "Ġg arden", + "p ublic", + "Ġsatisf ied", + "f old", + "O K", + "Ġin hab", + "ĠTh ink", + "Ġprogram me", + "Ġst omach", + "Ġcoord in", + "Ġh oly", + "Ġth reshold", + "Ġr het", + "Ġser ial", + "Ġemploy ers", + "ĠEvery thing", + "ra h", + "Ġb other", + "Ġbr ands", + "Val ue", + "ĠT ed", + "ĠPlan et", + "Ġp ink", + "ĠFurther more", + "s a", + "P E", + "re ck", + "ĠUS D", + "ot te", + "Ġ& &", + "Ġland ed", + "g ets", + "Ġprodu cers", + "Ġhealth care", + "Ġdomin ant", + "Ġdest ro", + "Ġam ended", + "ch ron", + "Ġf its", + "ĠSy d", + "ĠAuthor ity", + "AT CH", + "Ġfight s", + "ĠL LC", + "Ġ-- -", + "ĠCor p", + "Ġtox ic", + "spe cific", + "ĠC orn", + "ĠChe l", + "Ġtele phone", + "ĠP ant", + "Ġmyster ious", + "aun ch", + "od ox", + "med ia", + "Ġwitness es", + "ag u", + "Ġquestion ed", + "ĠBre xit", + "ĠRem ember", + "ene z", + "Ġend orse", + "iat ric", + "ĠId ent", + "Ġridic ulous", + "1 10", + "Ġpr ayer", + "Ġscient ist", + "Ġ19 50", + "ĠA qu", + "Ġunder ground", + "ĠU FC", + "m are", + "ĠL ater", + "w ich", + "Ġsubsc rib", + "Ġhost s", + "Ġer r", + "Ġgr ants", + "ant om", + "Ġsum mon", + "ear ly", + "ĠC lear", + "ĠPr im", + "Ġsusp ension", + "Ġguarant eed", + "app er", + "Ġr ice", + "ĠSe an", + "ĠSh in", + "Ġrefere ndum", + "Ġfl ed", + "r ust", + "Ġ3 60", + "ter y", + "Ġsh ocked", + "B R", + "ĠO il", + "ĠAll ah", + "Ġpart ly", + "Ġign or", + "Ġtrans mission", + "Ġhom osexual", + "ivers al", + "Ġhop efully", + "ãĤ ¤", + "Ġless on", + "L eg", + "Ġ ..", + "Y et", + "t able", + "app ropri", + "re tt", + "Ġbo ards", + "Ġincor rect", + "Ġb acteria", + "ar u", + "am ac", + "Ġsn ap", + ".' \"", + "Ġpar ad", + "t em", + "he art", + "Ġav ailability", + "Ġw isdom", + "Ġ( +", + "Ġpri est", + "ĠÂł ĠÂł", + "O pen", + "Ġsp an", + "Ġparam eter", + "Ġconv ince", + "Ġ( %)", + "r ac", + "Ġf o", + "Ġsafe ly", + "Ġconver ted", + "ĠOlymp ic", + "Ġres erve", + "Ġhe aling", + "ĠM ine", + "M ax", + "Ġin herent", + "ĠGra ham", + "Ġinteg rated", + "D em", + "Ġpip eline", + "Ġapp lying", + "Ġem bed", + "ĠCharl ie", + "Ġc ave", + "200 8", + "Ġcons ensus", + "Ġre wards", + "P al", + "ĠHT ML", + "Ġpopular ity", + "look ing", + "ĠSw ord", + "ĠAr ts", + "' )", + "Ġelect ron", + "clus ions", + "Ġinteg rity", + "Ġexclus ively", + "Ġgr ace", + "Ġtort ure", + "Ġburn ed", + "tw o", + "Ġ18 0", + "P rodu", + "Ġent reprene", + "raph ics", + "Ġg ym", + "ric ane", + "ĠT am", + "Ġadministr ative", + "Ġmanufacture r", + "Ġ vel", + "ĠN i", + "Ġisol ated", + "ĠMedic ine", + "Ġback up", + "Ġpromot ing", + "Ġcommand er", + "Ġfle e", + "ĠRus sell", + "Ġforg otten", + "ĠMiss ouri", + "Ġres idence", + "m ons", + "Ġrese mb", + "Ġw and", + "Ġmeaning ful", + "P T", + "Ġb ol", + "Ġhe lic", + "Ġwealth y", + "Ġr ifle", + "str ong", + "row ing", + "pl an", + "as ury", + "âĢ¦ .", + "Ġexpand ing", + "ĠHam ilton", + "Ġrece ives", + "S I", + "eat ures", + "ĠAn im", + "RE E", + "P ut", + "Ġbrief ly", + "ri ve", + "Ġstim ul", + "Ġ`` (", + "Ġ __", + "Ġch ip", + "Ġha z", + "Ġpri ze", + "ĠTh ings", + "AC E", + "ul in", + "d ict", + "ok u", + "Ġassoci ate", + "ock ets", + "y outube", + "St ory", + "ateg ory", + "Ġm ild", + "ail ing", + "ĠY e", + "O rig", + "ĠK a", + "or ig", + "Ġpropag anda", + "Ġan onymous", + "Ġstrugg led", + "Ġout rage", + "AT ED", + "ĠBe ijing", + "r ary", + "Ġle ather", + "Ġworld s", + "Ġbroad er", + "12 5", + "id al", + "ĠBet ter", + "Ġt ear", + "E xt", + "Ġpropos als", + "Ġit er", + "ĠSqu ad", + "Ġvol unt", + "m i", + "D id", + "ĠP u", + "p in", + "Ġspeak ers", + "Ġb orders", + "Ġfig ured", + "= '", + "Ġsimultane ously", + "aed a", + "Ġcharg ing", + "Ġur ged", + "Ġcon j", + "25 6", + "ĠG ordon", + "mer ce", + "Ġdocument ary", + "Sh are", + "it ol", + "ON E", + "ĠG arden", + "h att", + "ĠThom pson", + "ane ous", + "ap ore", + "Ġt anks", + "Ġless ons", + "tr ack", + "Ġout standing", + "Ġvolunte ers", + "Ġsp ray", + "Ġmanag ers", + "l arge", + "Ġcamp s", + "Ġart ificial", + "ĠR u", + "Ġb ags", + "th al", + "Ġcompat ible", + "ĠBl ade", + "Ġf ed", + "Ġarg ues", + "F I", + "Ġunf air", + "Ġcor n", + "Ġoff set", + "Ġdirect ions", + "Ġdisappoint ed", + "ĠCon vention", + "Ġview ing", + "M E", + "oc ity", + "Ġtown s", + "Ġlay ers", + "Ġro lled", + "Ġjump ed", + "Ġatt ribute", + "Ġun necess", + "inc oln", + "Ġsupp ose", + "ĠNet her", + "ch a", + "Ġbur ied", + "Ġsix th", + "B en", + "ress ing", + "OU R", + "Ġw ound", + "Ġcy cl", + "Ġmechan isms", + "Ġcongress ional", + "ĠE lement", + "Ġagre ements", + "Ġdec or", + "Ġclos est", + "ĠM it", + "Go ogle", + "} }", + "Ġm ixture", + "Ġflu id", + "S ign", + "ĠSch olar", + "Ġp ist", + "ask et", + "ab ling", + "Ġrac ing", + "he ro", + "ri el", + "ass y", + "Ġche aper", + "b en", + "Ġvert ical", + "amac are", + "ĠRead ing", + "g ments", + "Ġhelic op", + "Ġsacr ifice", + "ay a", + "p aren", + "V A", + "ĠL es", + "ĠStud io", + "Ġviol ations", + "ĠAn na", + "ac er", + "é ¾", + "ĠR at", + "ĠBe ck", + "ĠD ick", + "ĠA CT", + "Ġcomp osition", + "Ġtext ure", + "ĠO wn", + "Ġsmart phone", + "ĠN A", + "Ġfor b", + "im port", + "Ġdef ending", + "il st", + "re r", + "Ġo h", + "ĠJere my", + "Ġbank ing", + "cept ions", + "Ġrespect ive", + "/ .", + "Ġdr inks", + "ĠW i", + "Ġb ands", + "ĠL iverpool", + "Ġg rip", + "ĠB uy", + "Ġopen ly", + "Ġreview ed", + "per t", + "Ġver ify", + "ĠCo le", + "ĠW ales", + "M O", + "Ġun pre", + "Ġshel ter", + "ĠIm perial", + "Ġgu i", + "ĠD ak", + "Ġsuggest ions", + "Ġexplicit ly", + "Ġsl ave", + "Ġblock chain", + "Ġcompet ing", + "Ġprom ising", + "S ON", + "Ġsoc cer", + "Ġconst itution", + "4 29", + "Ġdist ract", + "ĠU ser", + "es ides", + "ĠMet hod", + "ĠTok yo", + "Ġaccompan ied", + "Cl ient", + "s ur", + "al og", + "Ġident ification", + "Ġinv asion", + "as ma", + "Ġindust ries", + "pp ers", + "Ġsub tle", + "ĠUn it", + "n atural", + "Ġsurv ived", + "Ġfl aw", + "ĺ ħ", + "ĠH oll", + "Ġdef icit", + "Ġtut orial", + "ĠCh ance", + "Ġarg uing", + "Ġcontem porary", + "Ġinteg ration", + "for ward", + "Ġt um", + "it is", + "Ġh iding", + "ĠD omin", + "ĠT an", + "ĠB uilding", + "ĠV in", + "Ġspokes person", + "ĠNot es", + "Ġemer ging", + "Ġprepar ation", + "Ġpro st", + "Ġsuspect s", + "Ġaut onom", + "D escription", + "Ġdeal t", + "ĠP ear", + "Ġstead y", + "Ġdecre ased", + "Ġso vere", + "ĠCl in", + "Ġgrad ually", + "ors es", + "ĠW AR", + "S erv", + "ãĤ ¢", + "h r", + "Ġd irty", + "ĠB arn", + "ĠB C", + "Ġd il", + "Ġcal endar", + "Ġcompl iance", + "Ġch amber", + "b b", + "Ġpass enger", + "ate ful", + "ĠT itle", + "ĠSyd ney", + "ĠG ot", + "Ġdark ness", + "Ġdef ect", + "Ġpack ed", + "ass ion", + "Ġgod s", + "Ġh arsh", + "IC K", + "le ans", + "Ġalgorith m", + "Ġoxy gen", + "Ġvis its", + "Ġbl ade", + "Ġkil omet", + "ĠKent ucky", + "Ġkill er", + "P ack", + "enn y", + "Ġdiv ine", + "Ġnom ination", + "be ing", + "Ġeng ines", + "Ġc ats", + "Ġbuff er", + "ĠPh ill", + "Ġtra ff", + "AG E", + "Ġtong ue", + "Ġrad iation", + "ere r", + "m em", + "ĠExpl icit", + "é¾ į", + "Ġcou ples", + "Ġphys ics", + "ĠMc K", + "Ġpolit ically", + "aw ks", + "ĠBl oom", + "Ġwor ship", + "e ger", + "ut er", + "ĠF O", + "Ġmat hemat", + "Ġsent enced", + "Ġdis k", + "ĠM arg", + "Ġ/ *", + "P I", + "Ġoption al", + "Ġbab ies", + "Ġse eds", + "ĠScott ish", + "Ġth y", + "] ]", + "ĠHit ler", + "P H", + "ng th", + "Ġrec overed", + "ing e", + "Ġpow der", + "Ġl ips", + "Ġdesign er", + "Ġdis orders", + "Ġcour age", + "Ġch aos", + "\" },{\"", + "Ġcar rier", + "b ably", + "H igh", + "ĠR T", + "es ity", + "l en", + "Ġrout es", + "u ating", + "F il", + "N OT", + "w all", + "s burgh", + "Ġeng aging", + "ĠJava Script", + "ore r", + "li hood", + "Ġun ions", + "ĠF ederation", + "ĠTes la", + "Ġcomple tion", + "ĠT a", + "Ġprivile ge", + "ĠOr ange", + "Ġne ur", + "paren cy", + "Ġb ones", + "Ġtit led", + "Ġprosecut ors", + "ĠM E", + "Ġengine er", + "ĠUn iverse", + "ĠH ig", + "n ie", + "o ard", + "Ġheart s", + "ĠG re", + "uss ion", + "Ġmin istry", + "Ġpen et", + "ĠN ut", + "ĠO w", + "ĠX P", + "in stein", + "Ġbul k", + "S ystem", + "ic ism", + "ĠMarket able", + "Ġpre val", + "Ġpost er", + "Ġatt ending", + "ur able", + "Ġlicens ed", + "ĠG h", + "et ry", + "ĠTrad able", + "Ġbl ast", + "à ¤", + "ĠTit an", + "ell ed", + "d ie", + "H ave", + "ĠFl ame", + "Ġprof ound", + "Ġparticip ating", + "Ġan ime", + "ĠE ss", + "Ġspec ify", + "Ġregard ed", + "ĠSpe ll", + "Ġs ons", + "own ed", + "Ġm erc", + "Ġexper imental", + "land o", + "h s", + "ĠDun geon", + "in os", + "Ġcomp ly", + "ĠSystem s", + "ar th", + "Ġse ized", + "l ocal", + "ĠGirl s", + "ud o", + "on ed", + "ĠF le", + "Ġconstruct ed", + "Ġhost ed", + "Ġsc ared", + "act ic", + "ĠIs lands", + "ĠM ORE", + "Ġbl ess", + "Ġblock ing", + "Ġch ips", + "Ġev ac", + "P s", + "Ġcorpor ation", + "Ġo x", + "Ġlight ing", + "Ġneighb ors", + "ĠU b", + "ar o", + "Ġbe ef", + "ĠU ber", + "F acebook", + "ar med", + "it ate", + "ĠR ating", + "ĠQu ick", + "Ġoccup ied", + "Ġaim s", + "ĠAdd itionally", + "ĠInt erest", + "Ġdram atically", + "Ġhe al", + "Ġpain ting", + "Ġengine ers", + "M M", + "ĠM ust", + "Ġquant ity", + "P aul", + "Ġearn ings", + "ĠPost s", + "st ra", + "ãĥ¼ ãĥ", + "Ġst ance", + "Ġdro pping", + "sc ript", + "Ġd ressed", + "M ake", + "Ġjust ify", + "ĠL td", + "Ġprompt ed", + "Ġscr ut", + "Ġspeed s", + "ĠGi ants", + "om er", + "ĠEd itor", + "Ġdescrib ing", + "ĠL ie", + "ment ed", + "Ġnow here", + "oc aly", + "Ġinst ruction", + "fort able", + "Ġent ities", + "Ġc m", + "ĠN atural", + "Ġinqu iry", + "Ġpress ed", + "iz ont", + "for ced", + "Ġra ises", + "ĠNet flix", + "ĠS ide", + "Ġout er", + "Ġamong st", + "im s", + "ows ki", + "Ġclim b", + "ne ver", + "Ġcomb ine", + "d ing", + "Ġcomp r", + "Ġsignific ance", + "Ġremem bered", + "ĠNev ada", + "ĠT el", + "ĠSc ar", + "ĠWar riors", + "ĠJ ane", + "Ġcou p", + "b as", + "Ġtermin al", + ", -", + "O H", + "Ġt ension", + "Ġw ings", + "ĠMy ster", + "�� ��", + "ĠUn like", + "val id", + "viron ments", + "ĠAl i", + "Ġn aked", + "book s", + "ĠM un", + "ĠG ulf", + "Ġd ensity", + "Ġdim in", + "Ġdesper ate", + "Ġpres idency", + "Ġ198 6", + "h y", + "IN D", + "Ġun lock", + "im ens", + "Ġhand led", + "ĠE b", + "Ġdisapp eared", + "Ġgen re", + "Ġ198 8", + "Ġdetermin ation", + "St ream", + "ik o", + "ap ters", + "Ġacknow ledge", + "J an", + "Ġcapital ism", + "P at", + "Ġ20 20", + "Ġpain ful", + "Ġcur ve", + "Ġbom bs", + "st orm", + "ĠMet al", + "en cer", + "ĠF ig", + "ĠA aron", + "anc hes", + "Ġins piration", + "Ġexha ust", + "t ains", + "ash i", + "Ġdesc ript", + "Ġr itual", + "ĠChel sea", + "Ġpromot ion", + "ĠH ung", + "ĠW ard", + "iv a", + "ĠE T", + "Ġto ss", + "all ow", + "ĠFranc is", + "D ep", + "Ġhapp iness", + "ĠGl ass", + "Ġbet a", + "Ġstreng then", + "N E", + "o a", + "Ġbutt ons", + "ĠMur ray", + "Ġkick ed", + "Qu est", + "ĠT alk", + "ĠS everal", + "ĠZ ero", + "Ġdr one", + "ul k", + "Ġc am", + "ĠM obile", + "Ġprevent ing", + "Ġret ro", + "ĠA x", + "Ġcru el", + "Ġflo at", + ". ),", + "Ġfil ing", + "ĠGr ant", + "ĠB or", + "Ġr ib", + "Ġchampions hip", + "ĠM erc", + "Ġsty les", + "Ġc ake", + "Ġbuild s", + "ĠS elf", + "io x", + "Ġep ic", + "oy d", + "B el", + "ĠSt ew", + ". (", + "ah u", + "ĠBe yond", + "Ġout s", + "Ġsol o", + "ĠT ree", + "Ġpres erve", + "Ġt ub", + "AR E", + "ro c", + "ĠIm pro", + "ĠW right", + "Ġbu nd", + "Ġtr aged", + "Ġoccas ional", + "b ian", + "Sec ond", + "r ons", + "Ġinter actions", + "form ed", + "s ing", + "Ġown s", + "Ġh ockey", + "Gener al", + "Ġlog ical", + "Ġexp end", + "Ġesc al", + "ĠGr iff", + "ĠC rown", + "ĠRes erve", + "Ġsto pping", + "Ġexc use", + "sec ond", + "Ġoper ated", + "Ġre aches", + "ĠMal ays", + "Ġpoll ution", + "ĠBrook lyn", + "Ġde lete", + "Ġhas h", + "Bl ock", + "ah a", + "âĢ ³", + "Ġsh orter", + "p iece", + "> >>", + "ĠM ormon", + "t or", + "Ġpartic les", + "ĠB art", + "ry ption", + "Ġad min", + "Ġsqu ee", + "VID IA", + "Ġcreat or", + "iam eter", + "ic ular", + "N BC", + "Ġgrab bed", + "Ġn odd", + "Ġr ated", + "Ġrot ation", + "Ġgr asp", + "Ġexcess ive", + "ĠE C", + "ĠWh it", + "Ġinvent ory", + "ault s", + "ĠF B", + "Ġe cosystem", + "Ġbill ions", + "Ġvent ure", + "n amed", + "Ġdef ender", + "out e", + "Inst ead", + "ir able", + "W ar", + "Ġassum ption", + "Ġb ite", + "Ġearth qu", + "t ail", + "sp ace", + "Ġgif ts", + "boy s", + "Ġinev itable", + "Ġstruct ural", + "Ġbenef icial", + "Ġcompe lling", + "h ole", + "erv ation", + "Ġco at", + "o j", + "inc arn", + "ĠY ears", + "Ġdetermin ing", + "Ġrhet oric", + "Ġbound aries", + "Ġwh ites", + "A nt", + "add y", + ") -", + "ra ham", + "eter min", + "Ġhar vest", + "ĠCon c", + "Ġlapt op", + "ĠM atch", + "Ġenjoy ing", + "cc a", + "oll ar", + "Ġtri ps", + "Ġadd iction", + "ĠS ak", + "Ġpow ered", + "Ġc ous", + "ĠRuss ians", + "ie re", + "Ġret rie", + "qu ality", + "Ġdiff er", + "Ġking dom", + "ĠL aur", + "ĠCap itol", + "Ġcon clusions", + "ĠAl tern", + "ĠN av", + "Ġtrans parent", + "B ER", + "G roup", + "ĠCom plete", + "Ġinf er", + "Ġint rig", + "Ġins ane", + "R O", + "oph ob", + "is en", + "qu al", + "Mich ael", + "Ġm useum", + "ĠP ope", + "Ġres et", + "r ative", + "f ive", + "Ġagg reg", + "itte es", + "osit ory", + "Ġcar b", + "ĠRec ord", + "Ġdec ides", + "ĠF ix", + "Ġexcept ions", + "ĠCommission er", + "un s", + "ĠEnvironment al", + "Ġlegend ary", + "ist ence", + "Ġtun nel", + "k m", + "Ġins ult", + "Ġt roll", + "Ġsh ake", + "Ġdet ention", + "qu es", + "ĠCh rome", + "ĠF iles", + "Ġsub t", + "Ġprospect s", + "Ġpro l", + "re nder", + "pro of", + "Ġperform ances", + "St r", + "Ġh ref", + "ern ame", + "Ġachieve ment", + "Ġf ut", + "F ull", + "ĠLe ban", + "go ogle", + "ãĥ Ī", + "amp a", + "May be", + "Ġproject ed", + "ĠE mb", + "Ġcol leg", + "Ġa wards", + "Ġâ Ķ", + "G old", + "ĠBl ake", + "ĠR aj", + "if ting", + "Ġp ending", + "Ġinst inct", + "Ġdevelop ments", + "Con nect", + "ĠM and", + "ĠW ITH", + "ĠPhilipp ines", + "prof ile", + "Ġalt ogether", + "ĠB und", + "ĠT D", + "oo oo", + "amp ed", + "ip h", + "Ġste am", + "Ġold est", + "Ġdet ection", + "ul pt", + "Ġ ç", + "ĠWay ne", + "200 6", + "f a", + "Ġcir cles", + "ĠF u", + "Ġdon ors", + "appropri ate", + "ĠDak ota", + "j amin", + "Ġmotiv ated", + "Ġpurch ases", + "ĠLouis iana", + "ĠS pl", + "Ġgl obe", + "Ġ10 5", + "z ip", + "c all", + "Ġdepart ments", + "Ġsustain able", + "10 5", + "ĠO P", + "if iers", + "Ġprevent ed", + "Ġinc omp", + "ĠComm ander", + "Ġdom inated", + "Ġ »", + "Ġinvest ed", + "Ġcomplex ity", + "Ġin cl", + "Ġens uring", + "Ġreal m", + "yn c", + "ĠInd ependent", + "r ained", + "ĠJ en", + "ĠFl ight", + "Ġat he", + "Ġspec ulation", + "ĠT E", + "oc ate", + "t ic", + "Ġpl aint", + "her ry", + "Ġto y", + "Ġ1 11", + "Ġpl ates", + "st atus", + "ĠIs a", + "Ġdev oted", + "C op", + "ĠE S", + "25 5", + "ur rency", + "M ain", + "Ġsl aves", + "Ġpe pper", + "Ġqu otes", + "Ġce iling", + "ĠF ish", + "Ġtrans formation", + "Ġfra ction", + "Ġadvant ages", + "Ġto ile", + "Ġstun ning", + "Ġmo ist", + "bre aking", + "s i", + "ĠL ocation", + "ĠMed ium", + "Ġtext s", + "Ġu gly", + "Ġb io", + ". âĢĶ", + "ĠB ased", + "Ġtr ains", + "ĠW ing", + "ĠAn cient", + "ĠRec ords", + "ĠH ope", + "Spe cial", + "ades h", + "ob i", + "[ /", + "Ġtempor arily", + "V er", + "h u", + "os er", + "Ġover night", + "Ġm amm", + "ĠTre asury", + "ĠV enezuel", + "ĠMeg a", + "Ġt ar", + "Ġexpect s", + "bl ack", + "or ph", + "\\\\ \\\\", + "Ġaccept ance", + "Ġrad ar", + "s is", + "Ġjun ior", + "Ġfram es", + "Ġobserv ation", + "ac ies", + "P ower", + "ĠAdv anced", + "M ag", + "olog ically", + "ĠMe chan", + "Ġsent ences", + "Ġanaly sts", + "augh ters", + "force ment", + "Ġv ague", + "Ġcl ause", + "Ġdirect ors", + "Ġeval uate", + "Ġcabin et", + "M att", + "ĠClass ic", + "A ng", + "Ġcl er", + "ĠB uck", + "Ġresear cher", + "Ġ16 0", + "Ġpoor ly", + "Ġexperien cing", + "ĠP ed", + "ĠMan hattan", + "Ġfre ed", + "Ġthem es", + "ad vant", + "Ġn in", + "Ġpra ise", + "10 4", + "ĠLib ya", + "b est", + "Ġtrust ed", + "Ġce ase", + "Ġd ign", + "D irect", + "Ġbomb ing", + "Ġm igration", + "ĠSci ences", + "Ġmunicip al", + "ĠA verage", + "Ġgl ory", + "Ġreve aling", + "Ġare na", + "Ġuncertain ty", + "Ġbattle field", + "ia o", + "G od", + "Ġc inem", + "ra pe", + "el le", + "ap ons", + "Ġlist ing", + "Ġwa ited", + "Ġsp otted", + "ke ley", + "ĠAud io", + "e or", + "ard ing", + "idd ing", + "ig ma", + "ĠN eg", + "Ġl one", + "Ġ ----", + "ex e", + "d eg", + "Ġtrans f", + "Ġwas h", + "Ġsl avery", + "Ġexpl oring", + "ĠW W", + "ats on", + "Ġen cl", + "l ies", + "ĠC reek", + "Ġwood en", + "Man ager", + "ĠBr and", + "um my", + "ĠAr thur", + "Ġbureau cr", + "Ġbl end", + "ar ians", + "F urther", + "Ġsupposed ly", + "Ġwind s", + "Ġ19 79", + "Ġgrav ity", + "Ġanalys es", + "ĠTra vel", + "ĠV eter", + "Ġd umb", + "Ġaltern ate", + "g al", + "Ġconsum ed", + "Ġeffect iveness", + ".' '", + "Ġpath s", + "ond a", + "L A", + "ĠStr ong", + "Ġen ables", + "Ġesc aped", + "Ġ\" \"", + "Ġ1 12", + "Ġ198 3", + "Ġsm iled", + "Ġtend ency", + "F ire", + "Ġp ars", + "ĠR oc", + "Ġl ake", + "Ġf itness", + "ĠA th", + "ĠH orn", + "Ġh ier", + "Ġimp ose", + "m other", + "Ġp ension", + "ic ut", + "bor ne", + "ic iary", + ". _", + "ĠS U", + "Ġpol ar", + "is y", + "eng u", + "itial ized", + "AT A", + "w rite", + "Ġexerc ises", + "ĠD iamond", + "ot ypes", + "Ġharm ful", + "on z", + "Ġprint ing", + "st ory", + "Ġexpert ise", + "ĠG er", + "Ġtraged y", + "ĠF ly", + "Ġd ivid", + "amp ire", + "st ock", + "M em", + "Ġre ign", + "Ġun ve", + "Ġam end", + "ĠProp het", + "Ġmut ual", + "ĠF ac", + "Ġrepl acing", + "H ar", + "ĠCirc uit", + "Ġthro at", + "ĠSh ot", + "Ġbatter ies", + "Ġto ll", + "Ġaddress ing", + "ĠMedic aid", + "Ġp upp", + "ĠN ar", + "ol k", + "Ġequ ity", + "M R", + "ĠHis pan", + "ĠL arge", + "m id", + "D ev", + "Ġexp ed", + "Ġdem o", + "ĠMarsh all", + "erg us", + "Ġf iber", + "Ġdiv orce", + "ĠCre ate", + "Ġsl ower", + "ĠPark er", + "ĠStud ent", + "ĠTr aining", + "Ret urn", + "ĠT ru", + "Ġc ub", + "ĠRe ached", + "Ġpan ic", + "Ġqu arters", + "Ġre ct", + "Ġtreat ing", + "Ġr ats", + "ĠChristian ity", + "ol er", + "Ġsac red", + "Ġdecl are", + "ul ative", + "et ing", + "Ġdeliver ing", + "est one", + "Ġt el", + "ĠL arry", + "Ġmet a", + "ac cept", + "art z", + "ĠRog er", + "hand ed", + "Ġhead er", + "Ġtra pped", + "ĠCent ury", + "Ġkn ocked", + "ĠOx ford", + "Ġsurviv ors", + "b ot", + "Ġdemon stration", + "Ġd irt", + "Ġass ists", + "OM E", + "ĠD raft", + "ortun ate", + "fol io", + "pe red", + "ust ers", + "g t", + "ĠL ock", + "Ġjud icial", + "ver ted", + "Ġsec ured", + "out ing", + "ĠBook s", + "Ġhost ing", + "Ġlif ted", + "l ength", + "Ġj er", + "Ġwhe els", + "ĠR ange", + "umbn ails", + "Ġdiagn osis", + "te ch", + "ĠStew art", + "ĠP ract", + "Ġnation wide", + "Ġde ar", + "Ġoblig ations", + "Ġgrow s", + "Ġmand atory", + "Ġsusp icious", + "! '", + "A pr", + "G reat", + "Ġmort gage", + "Ġprosecut or", + "Ġeditor ial", + "ĠK r", + "Ġprocess ed", + "ung le", + "Ġflex ibility", + "Ear lier", + "ĠC art", + "ĠS ug", + "Ġfoc uses", + "Ġstart up", + "Ġbre ach", + "ĠT ob", + "cy cle", + "ãĢ Į", + "ro se", + "Ġb izarre", + "ãĢ į", + "Ġveget ables", + "$ $", + "Ġret reat", + "osh i", + "ĠSh op", + "ĠG round", + "ĠSt op", + "ĠHawai i", + "ĠA y", + "Per haps", + "ĠBe aut", + "uff er", + "enn a", + "Ġproduct ivity", + "F ixed", + "cont rol", + "Ġabs ent", + "ĠCamp aign", + "G reen", + "Ġident ifying", + "Ġreg ret", + "Ġpromot ed", + "ĠSe ven", + "Ġer u", + "ne ath", + "aug hed", + "ĠP in", + "ĠL iving", + "C ost", + "om atic", + "me ga", + "ĠN ig", + "oc y", + "Ġin box", + "Ġem pire", + "Ġhor izont", + "Ġbr anches", + "Ġmet aph", + "Act ive", + "ed i", + "ĠFil m", + "ĠS omething", + "Ġmod s", + "inc ial", + "ĠOrig inal", + "G en", + "Ġspir its", + "Ġear ning", + "H ist", + "Ġr iders", + "Ġsacr ific", + "M T", + "ĠV A", + "ĠS alt", + "Ġoccup ation", + "ĠM i", + "Ġdis g", + "lic t", + "Ġn it", + "Ġn odes", + "e em", + "ĠP ier", + "Ġhat red", + "ps y", + "ãĥ ī", + "Ġthe ater", + "Ġsophistic ated", + "Ġdef ended", + "Ġbes ides", + "Ġthorough ly", + "ĠMedic are", + "Ġbl amed", + "arent ly", + "Ġcry ing", + "F OR", + "pri v", + "Ġsing ing", + "ĠI l", + "Ġc ute", + "o ided", + "olit ical", + "ĠNe uro", + "å ¤", + "Ġdon ation", + "ĠEag les", + "ĠG ive", + "T om", + "Ġsubstant ially", + "ĠLic ense", + "ĠJ a", + "Ġg rey", + "ĠAn imal", + "ĠE R", + "ĠU nd", + "Ġke en", + "Ġconclud e", + "ĠMississ ippi", + "Eng ine", + "ĠStud ios", + "P ress", + "o vers", + "ll ers", + "Ġ3 50", + "ĠR angers", + "Ġr ou", + "ert o", + "E p", + "iss a", + "iv an", + "Ġse al", + "ĠReg ist", + "dis play", + "Ġwe aken", + "u um", + "ĠComm ons", + "ĠS ay", + "Ġcult ures", + "Ġl aughed", + "Ġsl ip", + "Ġtreat ments", + "iz able", + "m art", + "ĠR ice", + "Ġbe ast", + "Ġob esity", + "ĠLa ure", + "ig a", + "Wh ich", + "hold er", + "Ġelder ly", + "Ġp ays", + "Ġcompl ained", + "Ġc rop", + "Ġpro c", + "Ġexplos ive", + "ĠF an", + "ĠAr senal", + "A uthor", + "ef ul", + "Ġme als", + "Ġ( -", + "id ays", + "Ġimag ination", + "Ġann ually", + "Ġm s", + "as ures", + "H ead", + "ik h", + "m atic", + "Ġboy friend", + "ĠCom puter", + "Ġb ump", + "Ġsur ge", + "ĠCra ig", + "ĠKir k", + "D el", + "medi ate", + "Ġscen arios", + "ĠM ut", + "ĠSt ream", + "Ġcompet itors", + "Ù Ħ", + "ĠStan ford", + "ĠRes ources", + "az ed", + "b age", + "Ġorgan is", + "ĠRe lease", + "Ġsepar ately", + "Ġha bits", + "Ġmeasure ments", + "ĠCl ose", + "Ġaccomp any", + "Ġg ly", + "Ġt ang", + "ĠR ou", + "Ġplug in", + "Ġcon vey", + "ĠChall enge", + "oot s", + "j an", + "Ġcur s", + "ĠRel ations", + "ke eper", + "Ġapproach ing", + "p ing", + "Spe aking", + "Ġarrang ement", + "ĠV I", + "are ttes", + "Ġaffect ing", + "Ġperm its", + "b ecause", + "Ġu seless", + "ĠH us", + "!! !!", + "Ġdestro ying", + "Un fortunately", + "Ġfasc inating", + "S em", + "Ġelect oral", + "Ġtrans parency", + "ĠCh aos", + "Ġvolunte er", + "Ġstatist ical", + "Ġactiv ated", + "ro x", + "We b", + "H E", + "ĠHamp shire", + "is ive", + "M ap", + "Ġtr ash", + "ĠLaw rence", + "st ick", + "C r", + "Ġr ings", + "EX T", + "Ġoper ational", + "op es", + "D oes", + "ĠEv ans", + "Ġwitness ed", + "P ort", + "Ġlaunch ing", + "ec onom", + "w ear", + "ĠPart icip", + "um m", + "cul es", + "ĠR AM", + "ĠT un", + "Ġass ured", + "Ġb inary", + "Ġbet ray", + "Ġexpl oration", + "ĠF el", + "Ġad mission", + "it ated", + "S y", + "Ġav oided", + "ĠSim ulator", + "Ġcelebr ated", + "ĠElect ric", + "¥ ŀ", + "Ġcl uster", + "itzer land", + "he alth", + "L ine", + "ĠN ash", + "at on", + "Ġsp are", + "Ġenter prise", + "ĠD IS", + "clud es", + "Ġfl ights", + "Ġreg ards", + "Ġà Ĺ", + "h alf", + "Ġtr ucks", + "Ġcontact s", + "Ġunc ons", + "ĠCl imate", + "Ġimm ense", + "N EW", + "oc c", + "ect ive", + "Ġemb od", + "Ġpat rol", + "Ġbes ide", + "Ġv iable", + "Ġcre ep", + "Ġtrig gered", + "ver ning", + "Ġcompar able", + "q l", + "Ġg aining", + "ass es", + "Ġ( );", + "ĠG rey", + "ĠM LS", + "s ized", + "Ġpros per", + "\" ?", + "Ġpoll ing", + "Ġsh ar", + "ĠR C", + "Ġfire arm", + "or ient", + "Ġf ence", + "Ġvari ations", + "g iving", + "ĠP i", + "osp el", + "Ġpled ge", + "Ġc ure", + "Ġsp y", + "Ġviol ated", + "Ġr ushed", + "Ġstro ke", + "ĠBl og", + "sel s", + "ĠE c", + ",' '", + "Ġp ale", + "ĠColl ins", + "ter ror", + "ĠCanad ians", + "Ġt une", + "Ġlabor atory", + "Ġn ons", + "t arian", + "Ġdis ability", + "ĠG am", + "Ġsing er", + "al g", + "ĠSen ior", + "Ġtrad ed", + "ĠWar rior", + "Ġinf ring", + "ĠFrank lin", + "Ġstr ain", + "ĠSwed ish", + "Ġsevent h", + "ĠB enn", + "ĠT ell", + "Ġsynd rome", + "Ġwond ered", + "id en", + "++ ++", + "ig o", + "Ġpur ple", + "Ġjournal ism", + "Ġreb el", + "Ġf u", + "bl og", + "Ġinv ite", + "ren cies", + "ĠCont act", + "Is rael", + "ĠCont ent", + "Ġche er", + "Ġbed room", + "ĠEngine ering", + "ĠQue ens", + "Ġd well", + "ĠPlay Station", + "ĠD im", + "ĠCol on", + "l r", + "Ġoper ates", + "Ġmotiv ation", + "US A", + "ast ered", + "C ore", + "ĠTr uth", + "ol o", + "OS E", + "ĠMem ory", + "Ġpred ec", + "Ġan arch", + "Ġ19 20", + "ĠY am", + "à ¨", + "b id", + "Ġgr ateful", + "Ġexc itement", + "Ġtre asure", + "Ġlong est", + "ct ive", + "Ġdes erves", + "Ġreserv es", + "Ġcop s", + "ĠOtt awa", + "ĠEgypt ian", + "ank ed", + "Ġart if", + "Ġhypot hesis", + ": /", + "Ġpurch asing", + "Ġlove ly", + "H P", + "Ġdiv ide", + "Ġstrict ly", + "Ġquestion ing", + "Ġtaxp ayers", + "ĠJ oy", + "Ġroll s", + "ĠHe avy", + "Ġp orts", + "Ġmag netic", + "Ġinf lamm", + "Ġbr ush", + "t ics", + "â ĪĴ", + "Ġbott les", + "pp y", + "Ġp add", + "ãĤ ¯", + "m illion", + "Ġdevast ating", + "Ġcomp iled", + "Ġmed ication", + "Ġtw elve", + "ĠPer ry", + "Sp ace", + "im b", + "y our", + "Ġle aked", + "ĠT ar", + "Ġun ity", + "Ġinfect ed", + "Ġtravel ed", + "ID E", + "ĠMc Donald", + "t xt", + "ĠPr inc", + "Ġinter ven", + "ĠTai wan", + "ĠP ow", + "Ġbe aring", + "ĠTh read", + "Ġz ones", + "iz ards", + "un ks", + "Ch apter", + "ll or", + "Ġ ·", + "Ġw ounds", + "Ġdisc retion", + "Ġsucceed ed", + "ik ing", + "Ġicon ic", + "C all", + "Ġscreen ing", + "ĠM is", + "ict s", + "Ġmin isters", + "Ġsepar ation", + "Pl ayer", + "Ġb ip", + "Ġbel oved", + "Ġcount ing", + "ĠE ye", + "ar ound", + "ing ing", + "Ġtable t", + "Ġoff ence", + "in ance", + "h ave", + "ĠInf o", + "ĠNin ja", + "Ġprotect ive", + "ĠC ass", + "M ac", + "ĠQual ity", + "N orth", + "Ġ ic", + "ĠCub a", + "ĠChron icle", + "ĠPro perty", + "Ġfast est", + "ot os", + "ĠG erm", + "OW N", + "Ġbo om", + "ĠStan ley", + "ergus on", + "Ġcle ver", + "Ġent ers", + "m ode", + "ter ior", + "ĠS ens", + "Ġlin ear", + "AR K", + "Ġcomp aring", + "Ġpure ly", + "Ġsaf er", + "ĠPot ter", + "Ġc ups", + "R T", + "Ġgl uc", + "Ġatt ributed", + "Ġdu pl", + "ĠP ap", + "Ġprec ious", + "Ġp a", + "iction ary", + "ĠT ig", + "ĠTo o", + "ol utions", + "st an", + "Ġrob ots", + "Ġlob b", + "Ġstat ute", + "Ġprevent ion", + "w estern", + "16 0", + "ĠAct ive", + "ĠMar ia", + "h al", + "N one", + "ell ar", + "ĠK B", + "ĠPart ners", + "ĠSing le", + "ĠFollow ing", + "ang o", + "ac ious", + "Ġth ou", + "Ġk g", + "Ġinflu ential", + "ĠFriend s", + "S ur", + "ain ted", + "Ġfor ums", + "Ġst arter", + "Ġcitizens hip", + "ĠE lection", + "on ge", + "ot ation", + "os ph", + ";; ;;", + "ut ical", + "p ur", + "ere n", + "Ġaccus ations", + "bit ious", + "ab bit", + "ĠOr d", + "Post ed", + "ir k", + "Ġsens itivity", + "ic he", + "ĠAm y", + "ĠF ab", + "Ġsum mit", + "Ġped est", + "Ġrub ber", + "Ġagric ultural", + "Ġcan cel", + "A E", + "Ġin aug", + "Ġcont am", + "Ġfirm ly", + "i w", + "st age", + "ĠK an", + "Ġt ier", + "Ġinv ention", + "Ġtransl ated", + "ĠR ules", + "B ox", + "Tw itter", + "ID S", + "Ġp izza", + "Ġdeb ug", + "ĠD rop", + "v s", + "Ġh orses", + "b ig", + "Ġb oring", + "Ġh ood", + "ĠMcC ain", + "at ched", + "ĠBro s", + "Ġsk ip", + "Ġess ay", + "st at", + "ĠLeg ends", + "Ġam munition", + "au c", + "Ġshoot er", + "Ġun h", + "Ġsuppl ied", + "Ġgener ic", + "ĠS K", + "ib an", + "yr ics", + "Ġ25 5", + "Ġclim bing", + "Form er", + "Ġfl ip", + "Ġjump ing", + "Ġfrust ration", + "ĠTer ry", + "Ġneighborhood s", + "Ġmed ian", + "be an", + "Ġbr ains", + "Follow ing", + "Ġsh aped", + "Ġdraw s", + "Ġal tered", + "J ack", + "Ġrecip es", + "Ġsk illed", + "we alth", + "ach i", + "e lection", + "Ġbehavi ors", + "de als", + "ĠU ntil", + "F e", + "Ġdecl aration", + "mar ks", + "ĠBet ween", + "cel ona", + "Ġres on", + "Ġbub ble", + "Am ong", + "Ġim perial", + "G S", + "Ġfemin ist", + "200 5", + "ĠK yle", + "Ġaccount ing", + "ĠTe le", + "ĠT yr", + "Ġconnect ing", + "Ġre hab", + "ĠP red", + "s im", + "Ġmeant ime", + "Ġphys ician", + "M W", + "ĠCamp bell", + "ĠBr andon", + "Ġcontribut ing", + "ĠR ule", + "ĠWe ight", + "ĠN ap", + "Ġinter active", + "Ġv ag", + "Ġhel met", + "ĠCom b", + "f our", + "Ġsh ipped", + "Ġcomple ting", + "ĠP D", + "PD ATE", + "Ġspread ing", + "Ġsc ary", + "erv ing", + "ĠG as", + "Ġfr ank", + "s chool", + "Ġrom antic", + "Ġstab il", + "R ob", + "Ġaccur ately", + "Ġac ute", + "ĠH ann", + "Ġsymbol s", + "Ġcivil ization", + "ĠA W", + "Ġlight ning", + "Ġcons iders", + "Ġven ue", + "Ġ ×", + "Ġo ven", + "ĠS F", + "h is", + "Ġn u", + "ĠLear n", + "Ġpe oples", + "Ġst d", + "Ġsle e", + "Ġs lic", + "ĠStat istics", + "Ġcor ners", + "ĠB aker", + "Ġ: )", + "ment ation", + "ol ver", + "Ġlaugh ing", + "ĠT odd", + "ond e", + "ĠH ills", + "Ġn uts", + "ĠW oman", + "pl ane", + "Ġl iver", + "ĠIn side", + "S orry", + "Ġagre es", + "Ġfund ament", + "ĠF isher", + "Ġa uction", + "Ġthread s", + "gl as", + "ĠBas ic", + "ĠN at", + "Ġlack ing", + "Ġceleb ration", + "j u", + "Ġs illy", + "E uro", + "Ġt att", + "ight y", + "cont rolled", + "T est", + "ĠSing h", + "Ġr age", + "Ġrh yth", + "o ffic", + "ĠPh antom", + "Ġhead lines", + "Ġrespond ing", + "ĠMor ning", + "Ġvit amin", + "Ġboot s", + "ĠS ite", + "al in", + "p i", + "Ġvir al", + "ĠU C", + "D ER", + "ĠSe x", + "Ġst ocks", + "c urrent", + "Ġch urches", + "ĠR are", + "ĠMur phy", + "Ġden ial", + "ĠG aming", + "Ġtou g", + "Ġn ick", + "Ġm akers", + "ĠRon ald", + "Ġgener ous", + "ĠD oc", + "ĠMor ris", + "Ġtransform ed", + "ĠN ormal", + "Ġ10 4", + "ĠKick starter", + "ĠUp on", + "On line", + "ĠI RS", + "Ġw rap", + "Ġl oving", + "Ġarri ves", + "ĠD ue", + "Ġhe ter", + "ĠM ade", + "Ġrent al", + "Ġbelong s", + "Ġatt orneys", + "Ġcro ps", + "Ġmat ched", + "ul um", + "ol ine", + "10 9", + "Ġdis par", + "Ġbuy ers", + "ĠCam bridge", + "Ġeth ics", + "rou ps", + "Ġjust ified", + "Ġmarg inal", + "Ġrespect ed", + "win ning", + "Ġnodd ed", + "ĠSer ge", + "ĠForm er", + "C raft", + "######## ########", + "ĠWar ner", + "Ġd ash", + "et e", + "Ġent ert", + "ĠE scape", + "out heast", + "Ġkn ees", + "ĠB omb", + "Ġr ug", + "P ass", + "Ġatt itudes", + "go vernment", + "ĠPri or", + "Ġqual ities", + "Ġnot ification", + "ĠPh one", + "l ie", + "Ġanticip ated", + "ĠCom bat", + "ĠBar ry", + "Ġ198 2", + "Us ers", + "on er", + "Ġcomput ing", + "ĠConnect icut", + "Ġless er", + "Ġpe ers", + "ĠC u", + "Ġtechn ically", + "Ġsub mission", + "ĠUn iversal", + "Ġman ually", + "our ge", + "Ġrespond ents", + "ĠB TC", + "ĠH ost", + "Ġf are", + "ĠB ird", + "Ġrece ipt", + "al so", + "Ġj ack", + "Ġagric ulture", + "Ġsk ull", + "Ġ! =", + "Ġpass ive", + "ĠC I", + "Ġsoc ieties", + "Ġremind ed", + "Ġinter ference", + "B uy", + "Ġâ ľ", + "g on", + "Ġscrut iny", + "ĠW itch", + "Ġconduct ing", + "Ġ ãĥ", + "Ġexch anges", + "ĠMit chell", + "Ġinhab it", + "Ġtw ist", + "B D", + "Ġwhere ver", + "group on", + "Ġj okes", + "ĠBen jamin", + "ĠR andom", + "fr ame", + "ĠL ions", + "Ġhighlight ed", + "ĠArk ansas", + "E nt", + "Ġp ile", + "Ġpre lim", + "g s", + "mind ed", + "Ġfel ony", + "ĠG A", + "ĠL uck", + "Ġpract ically", + "ĠB os", + "Ġact ress", + "D am", + "ĠB ou", + "Ġvis a", + "Ġembed ded", + "Ġhy brid", + "Ġear liest", + "Ġsoon er", + "s ocial", + "ĠH A", + "Ġste ep", + "Ġdis advant", + "Ġexplo it", + "ĠE gg", + "ĠUlt ra", + "Ġnecess ity", + "L ocal", + "ie ge", + "Ġd ated", + "Ġmass es", + "Ġsubsc ription", + "pl ess", + "Ġan onym", + "Ġpresum ably", + "Bl ue", + "The ir", + "asket ball", + "ĠPhil ip", + "Ġcom ed", + "load ed", + "r ane", + "Ġref lection", + "Ch ina", + "Ġext ends", + "Ġform ing", + "Ġund ers", + "200 1", + "Ġgr at", + "Ġconcent rations", + "Ġins ulin", + "Ġsec ular", + "Ġwh ilst", + "Ġwin ners", + "Ad vertisements", + "Ġdeliber ately", + "ĠWork ing", + "Ġs ink", + "et ics", + "d ale", + "Ġmand ate", + "Ġg ram", + "Ġvac ation", + "Ġwarn ings", + "ri pp", + "ĠTH AT", + "Ġcomment ary", + "Ġint u", + "Ġa est", + "Ġreason ing", + "Ġbreak down", + "ĠZ ombie", + "Ġ-- >", + "ĠPolit ical", + "c ott", + "Ġthr ust", + "Ġtechn ological", + "Ġdec iding", + "Ġtraff icking", + "L ong", + "W elcome", + "pr ising", + "ĠCommun ications", + "Ġend ors", + "Ġsw ift", + "Ġmetab ol", + "co ins", + "res a", + "ĠHT TP", + "Ġen roll", + "ĠH appy", + "us r", + "int age", + "Ġ[ \"", + "u ably", + "ĠM aterial", + "Ġrepe al", + "Se pt", + "k h", + "ĠMod i", + "Ġunder neath", + "ĠI L", + "sh ore", + "Ġdiagn osed", + "ace utical", + "Ġsh ower", + "au x", + "ĠSw itch", + "ĠStre ngth", + "Ġj ihad", + "n ational", + "Ġtra uma", + "uss y", + "on i", + "Ġcons olid", + "Ġcal ories", + "ĠF lynn", + "ag ged", + "16 8", + "ĠP ink", + "Ġfulf ill", + "Ġch ains", + "Ġnot ably", + "ĠA V", + "L ife", + "ĠCh uck", + "m us", + "ĠUr ban", + "ĠH end", + "Ġdep osit", + "ĠS ad", + "Ġaff air", + "OR K", + "ie val", + "ĠF DA", + "Ġt rop", + "ĠOver all", + "Ġvirt ue", + "Ġsatisf action", + "au nd", + "Ġl un", + "ĠSw itzerland", + "ĠOper ation", + "pro cess", + "Ġsh ook", + "Ġcount ies", + "le ased", + "ĠCharl otte", + "1 12", + "Ġtrans cript", + "Ġre dd", + "p ush", + "ĠHe y", + "ĠAn alysis", + "[ \"", + "Ġaltern atives", + "ard less", + "Ġele ph", + "Ġpre jud", + "ĠLe af", + "H aving", + "ĠH ub", + "Ġexpress ions", + "ĠVol ume", + "Ġshock ing", + "ĠRed s", + "Ġread ily", + "Ġplan ets", + "ad ata", + "Ġcollaps ed", + "ĠMad rid", + "Ġir rit", + "i pper", + "ĠEn c", + "ĠW ire", + "Ġbu zz", + "ĠG P", + "ash a", + "Ġaccident ally", + "ur u", + "Ġfrust rated", + "ĠS A", + "Ġhung ry", + "ĠH uff", + "Ġlab els", + "ant o", + "ĠE P", + "Ġbar riers", + ") |", + "ĠBer keley", + "ĠJ ets", + "Ġp airs", + "ĠL an", + "J ames", + "ĠB ear", + "Ġhum or", + "ĠLiber ty", + "Ġmagn itude", + "Ġag ing", + "ĠM ason", + "Ġfriends hip", + "umb ling", + "Ġemer ge", + "Ġnewsp apers", + "Ġam bitious", + "ĠRich ards", + "atern al", + "Ġ198 1", + "Ġcook ies", + "Ġsc ulpt", + "Ġpur suit", + "L ocation", + "Ġscript s", + "p c", + "Ġarrang ements", + "Ġd iameter", + "Ġl oses", + "am ation", + "Ġl iqu", + "ĠJ ake", + "aret te", + "Ġunderstand s", + "ĠZ en", + "v m", + "Ġappro ve", + "Ġw ip", + "Ġult ra", + "Ġint end", + "ĠD I", + "asc ular", + "Ġst ays", + "ĠK or", + "ĠK l", + "Ġinvest ing", + "L a", + "Ġbelie ving", + "b ad", + "m outh", + "Ġtaxp ayer", + "ãĥ ĥ", + "ĠQue bec", + "Ġl ap", + "ĠSw iss", + "d rop", + "Ġdr ain", + "ir i", + "et c", + "ft en", + "ĠN ex", + "Ġst raw", + "Ġscream ing", + "Ġcount ed", + "Ġdam aging", + "Ġamb assador", + "cent ury", + "Ġpro x", + "Ġarrest s", + "u v", + "il ateral", + "ĠCh arg", + "Ġpresc ribed", + "Ġindepend ently", + "Ġf ierce", + "ĠB aby", + "Ġb rave", + "Ġsu its", + "= >", + "Ġbas eline", + "ĠR ate", + "Ġis lands", + "Ġ( (", + "g reen", + "ix els", + "Ġname ly", + "ĠVill age", + "th an", + "am y", + "V ersion", + "g mail", + "ential s", + "ĠS ud", + "ĠMel bourne", + "Ġarri ving", + "Ġquant um", + "e ff", + "rop olitan", + "T ri", + "Ġfun eral", + "ĠI R", + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ", + "ĠC ob", + "it ably", + "Ġt urb", + "Ġcomb o", + "Re view", + "Ġdeploy ment", + "u ity", + "ĠB ott", + "Ġinv isible", + "Ġrender ing", + "Ġunl ocked", + "Ġa qu", + "ĠVlad imir", + "Ġp ad", + "ĠBr ain", + "ĠLeg acy", + "dr agon", + "ĠKurd ish", + "Ġsound ed", + "Ġdet ained", + "ĠD M", + "g ary", + "Ġd aughters", + "Ġdistur bing", + "uk a", + "ĠPar ad", + "Ġt ast", + "Ġunf ortunate", + "Ġu l", + "em in", + "Ġattend ance", + "tr l", + "Ġpar ks", + "ĠMem orial", + "ĠAl ice", + "oth y", + "gu ard", + "ĠD ise", + "ĠSh an", + "ĠFor um", + "R ich", + "Ġshif ted", + "ue z", + "Ġl ighter", + "ĠMag n", + "Ġc od", + "S ch", + "ham mad", + "P ub", + "3 50", + "ĠP okemon", + "Ġprot otype", + "Ġun re", + "B ase", + "ĠStud ents", + "ĠRep ly", + "ĠCommun ist", + "Ġg au", + "ĠTy ler", + "I Z", + "Ġparticip ated", + "Ġsup rem", + "ĠDet ails", + "Ġvessel s", + "ro d", + "Ġt ribe", + "ke ep", + "Ġassum ptions", + "Ġp ound", + "Ġcr ude", + "ĠAv ailable", + "Ġswim ming", + "Ġin clusion", + "Ġadv ances", + "c ulation", + "Ġconserv ation", + "Ġover d", + "ĠBuff alo", + "Art icle", + "ed ge", + "Ġaw a", + "ĠMad ison", + "Ġsid ew", + "Ġcat ast", + "ĠK rist", + "uc le", + "ĠHigh way", + "ĠTer ror", + "Ġactiv ation", + "Ġuncons cious", + "ĠSat an", + "ĠSus an", + "ill ery", + "Ġarr anged", + "i op", + "Ġrum ors", + "ur ring", + "th ink", + "ĠKe ith", + "ĠK ind", + "Ġavoid ing", + "by n", + "n ut", + "ĠSpe aker", + "r us", + "n ames", + "Ġgu ilt", + "ĠOlymp ics", + "Ġsa il", + "ĠM es", + "lev ant", + "ĠColumb us", + "a ft", + "C ity", + "S outh", + "ĠHar vey", + "ĠP un", + "S everal", + "Ġment ally", + "Ġimp ress", + "m ount", + "ĠUb untu", + "âĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶ", + "ĠSuper man", + "ĠMP s", + "Ġintent ions", + "ĠR acing", + "Ġlike lihood", + "Ġ2 40", + "T otal", + "Ġto ys", + "ĠW atson", + "Ġur ge", + "L ear", + "ĠP aper", + "Ġoccur ring", + "ĠB eng", + "ĠC ert", + "Ġst ones", + "T im", + "ĠTw in", + "z b", + "ĠD ynam", + "Ġpolit ician", + "k ens", + "ĠEnter prise", + "UT ERS", + "Ġab ol", + "Ġref resh", + "Ġarbit rary", + "pe ction", + "Ġtrou bles", + "Ġ} );", + "t v", + "Ġpil ots", + "Ġdist ribute", + "Ġaud it", + "Ġp ause", + "orig inal", + "Ġr ivals", + " £", + "F ig", + "T L", + "ab il", + "ry ing", + "L in", + "ion ed", + "l on", + "Ġf ancy", + "Ġcr ashed", + "Ġt ract", + "Ġshe d", + "Ġcons ume", + "B ased", + "down load", + "in it", + "Ġvolt age", + "Int rodu", + "Ġcondem ned", + "ĠFin ance", + "res pect", + "Ġex cluded", + "Ġestablish ing", + "her ic", + "Ġher itage", + "Ġspect acular", + "Ġun st", + "ĠSnow den", + "ĠL ane", + "S an", + "Ġprotect ions", + "st ruction", + "inc inn", + "Ġmac ro", + "C ustom", + "ios ity", + "Ġes p", + "Ġfunction ing", + "Ġm ush", + "Ġp uzzle", + "Ġeth ical", + "M al", + "Ġgo verning", + "ĠF erguson", + "Ġrest ored", + "Ġst ressed", + "ĠCoun ter", + "ĠK as", + "cl ip", + "AN S", + "Ġse iz", + "U K", + "by ss", + "old own", + "ap i", + "Ġperman ently", + "oun ters", + "W est", + "Th rough", + "L ight", + "at oes", + "Ġne at", + "Ġc ord", + "ure r", + "Ġsevere ly", + "ĠA ven", + "Ġinter rog", + "Ġtri ple", + "G iven", + "N umber", + "Ġar ise", + "Ġs her", + "pl ant", + "Ġfl ower", + "ĠC ou", + "Ġat e", + "Ġnew er", + "b ul", + "Ġmean while", + "ĠL air", + "Ġadjust ment", + "ĠCop yright", + "Ġd ivers", + "i ological", + "Ġgam ers", + "o at", + "Ġhistor ically", + "Ġanal og", + "Ġlong time", + "Ġpres cription", + "ĠM ist", + "ĠHy per", + "ĠM aine", + "ĠDe ity", + "Ġmulti pl", + "ĠRe incarn", + "ĠH yd", + "ĠP ic", + "S il", + "r ants", + "ĠC ris", + ". ;", + "( {", + "epend ence", + "Ġrec y", + "ate ur", + "Ġqu ad", + "Ġgl ob", + "Ġcon ced", + "te am", + "Ġcapital ist", + "ĠL ot", + "Ġroy al", + "ĠCy ber", + "Ġblack s", + "met ic", + "ri v", + "ĠD anny", + "Ġsp o", + "ĠR O", + "Ġanim ated", + "rypt ed", + "ĠDep uty", + "Ġrend ered", + "F E", + "Ġstre ak", + "Ġcloud s", + "ĠDou g", + "~~~~ ~~~~", + "Ġdisc our", + "ĠVe h", + "Ġpsych ology", + "ĠJ ourney", + "Ġcry stal", + "ĠFro st", + "Ġsuspic ion", + "Ġrel ate", + "or us", + "ĠC rypt", + "ĠN VIDIA", + "com ed", + "ut ing", + "incinn ati", + "Ġvulner ability", + "ost ic", + "Ġisol ation", + "Ġcool ing", + "ĠCoal ition", + "Ġ1 19", + "F our", + "ĠDe al", + "Ġâ ī", + "se mble", + "ram ent", + "ĠBar celona", + "Ġ10 2", + "Ġcoc aine", + "ocaly pse", + "F eb", + "ogen ic", + "Ġmut ation", + "Ġcrypt oc", + "ĠK el", + "ĠG it", + "a is", + "Ġs isters", + "AN K", + "Ġactiv ate", + "T er", + "Ġd read", + "yl on", + "Ġprop ri", + "A ust", + "ĠDef ault", + "Ġout door", + "Ġshe er", + "ce ive", + "Ġg ently", + "Ð ¾", + "Pro gram", + "Ġâ ĨĴ", + "Ġve gan", + "ĠCr us", + "Ġrespons ibilities", + "ĠH R", + "OL D", + "Ġprev ents", + "Ġst iff", + "ĠW ere", + "Ġathlet ic", + "ĠSc ore", + "Ġ) :", + "Ġcolumn s", + "ĠL oc", + "av ailable", + "ĠF ram", + "ĠS essions", + "Ġcompan ion", + "Ġpack s", + "14 0", + "ĠKn ights", + "Ġf art", + "Ġstream s", + "Ġsh ore", + "Ġapp eals", + "ĠPer formance", + "h aul", + "ĠSt ra", + "ĠN ag", + "10 3", + "ĠTrans portation", + "B B", + "E v", + "z an", + "P ublic", + "Ġtw in", + "uls ion", + "M ult", + "Ġelect ro", + "Ġstat ue", + "ation ally", + "ĠN ort", + "Ġins pection", + "/ *", + "ig ue", + "Ġcomp assion", + "ĠT ales", + "ĠSte in", + "ĠSc reen", + "ĠB ug", + "ĠL ion", + "g irl", + "Ġwithdraw al", + "Ġobject ives", + "Ġblood y", + "Ġprelim inary", + "Ġj acket", + "Ġdim ensions", + "ĠC ool", + "ĠOcc up", + "Ġw reck", + "Ġdoub led", + "ank ing", + "Ġ19 75", + "Ġglass es", + "ĠW ang", + "pro v", + "P ath", + "connect ed", + "ĠMult i", + "ĠNor way", + "agon ist", + "Ġfe ared", + "Ġtouch ing", + "Ġarg uably", + "¯¯¯¯ ¯¯¯¯", + "ĠNC AA", + "che m", + "Ġsp at", + "ĠW WE", + "ĠC el", + "ig ger", + "Ġattack er", + "ĠJo in", + "ob ject", + "ett a", + "Ġelim inated", + "d et", + "Ġdest ruct", + "ĠLuc as", + "ct uary", + "18 0", + "ĠBr ady", + "ĠBl ues", + "B ay", + "au kee", + "Ġtim eline", + "Ġdeleg ates", + "w ritten", + "uff icient", + "Ġsh apes", + "Cop yright", + "ou ble", + "serv ice", + "Ġp ione", + "Ġcolleg es", + "Ġrow s", + "Ġsp ite", + "Ġassess ed", + "3 60", + "Ġle ase", + "Ġconfident ial", + "ck er", + "ĠMan ning", + "ĠV oice", + "Ġse aled", + "Ġcalcul ate", + "N O", + "ĠAss istant", + "Ġteen ager", + "ul ent", + "ather ine", + "Ġm ock", + "Ġd iamond", + "Ġf est", + "Ġsw itched", + "Ġres ume", + "ĠPu erto", + "Ġl anes", + "ir ation", + "ĠSimilar ly", + "Ġro d", + "ĠS el", + "ĠPal ace", + "ĠLim ited", + "e ous", + "Ġvar iant", + "Ġw ard", + "Ġ) )", + "Sh ow", + "OO K", + "A lex", + "ĠN ep", + "br is", + "ĠWik ipedia", + "Ġexcept ional", + "Ġman ages", + "ĠD raw", + "Ag ain", + "Ġco pper", + "ut t", + "Ġex ports", + "Ġport folio", + "Ġelev ated", + "R ated", + "ĠOther wise", + "ĠT act", + "ĠShe l", + "ĠT X", + "\" âĢĶ", + "Ġres ur", + "ĠW a", + "ven ant", + "Ġmon etary", + "pe ople", + "E mail", + "Ġfif ty", + "ĠS weet", + "ĠMalays ia", + "Ġconf using", + "ĠR io", + "ud a", + "uten ant", + "\" );", + "Ġpra ised", + "Ġvol umes", + "t urn", + "Ġm ature", + "Ġnon profit", + "Ġpassion ate", + "ĠPriv ate", + "Ġ10 3", + "Ġdesc end", + "ç ¥ŀ", + "uff y", + "head ed", + "Whe ther", + "ri en", + "ze ch", + "be it", + "Ġch rom", + "ĠMc M", + "Ġd ancing", + "Ġe leg", + "ĠNot iced", + "11 5", + "Ġadvoc acy", + "ENT S", + "amb ling", + "ĠMin or", + "ĠF inn", + "Ġprior ities", + "Ġthere of", + "ĠSt age", + "ĠRog ers", + "Ġsubst itute", + "ĠJ ar", + "ĠJeff erson", + "Ġlight ly", + "10 2", + "ĠL isa", + "u its", + "ys ical", + "Ġshif ts", + "Ġd rones", + "Ġwork place", + "Ġres id", + "ens ed", + "ah n", + "Ġpref erences", + "ser ver", + "Ġdeb ates", + "d oc", + "ĠGod s", + "Ġhelicop ter", + "Ġhon our", + "Ġconsider ably", + "ed ed", + "ĠF emale", + "ĠAn ne", + "Ġre un", + "ĠF ace", + "ĠHall ow", + "ĠBud get", + "Ġcondem n", + "Ġt ender", + "Pro f", + "ocr atic", + "ĠTurn er", + "ĠAg ric", + "Ġ19 76", + "Ġa pt", + "d isc", + "ĠF ighter", + "ĠA ur", + "Ġgar bage", + "in put", + "ĠK arl", + "ĠOl iver", + "ĠL anguage", + "k n", + "N on", + "ĠCl ar", + "Ġtrad itions", + "Ġad vertisement", + "ĠS or", + "Ġarch ive", + "Ġvill ages", + "7 50", + "Ġimplement ing", + "w aukee", + "Ġdiet ary", + "Ġswitch ing", + "Rep ublic", + "Ġvel ocity", + "Ġc it", + "ĠA wards", + "Ġfin ancing", + "Ġlast ed", + ") ]", + "Ġrem inder", + "P erson", + "Ġprec ision", + "Ġdesign ers", + "ĠF ried", + "ĠB order", + "Ġtr agic", + "Ġw ield", + "Ġiniti atives", + "ĠT ank", + "w er", + "Ġjo ins", + "R o", + "in ery", + "Ġar row", + "Ġgener ating", + "found er", + "Ġsear ches", + "Ġrandom ly", + "A ccess", + "Ġb atch", + "Ġp osed", + "l at", + "Ġpursu ing", + "as a", + "Ġtest ified", + "form ing", + "ĠSh ar", + "w iki", + "ĠE ither", + "S ometimes", + "Ġsen ators", + "ĠJohn ny", + "ĠTal iban", + "ĠG PS", + "\":\" /", + "ãģ® å", + "Ġanaly zed", + "ĠRub io", + "ĠMove ment", + "op ard", + "ii i", + "St and", + "f ight", + "Ġign oring", + "i ang", + "ĠG N", + "so ever", + "ĠST AT", + "Ġref using", + "Ġswe at", + "Ġb ay", + "P ORT", + "ir med", + "ak y", + "Ġdis pro", + "Ġlabel ed", + "Ġ10 8", + "H ello", + "Ġple asant", + "ab a", + "Ġtri umph", + "Ġab oard", + "Ġinc om", + "ĠC row", + "le tt", + "Ġfol k", + "Ġch ase", + "` `", + "ĠBr us", + "Ġte ens", + "c ue", + "Ġter rain", + "h yd", + "il ight", + "OR Y", + "Su pport", + "ew s", + "ll i", + "rain ts", + "ĠC and", + "Ġab used", + "ach ment", + "l arg", + "B as", + "ĠC ancer", + "Ġ19 78", + "Ġsupp orter", + "ac cess", + "ĠTer min", + "ĠT ampa", + "ĠAN Y", + "Ġnew est", + "ĠCrim inal", + "ed u", + "Ġ19 30", + "Ġadm its", + "Ġend e", + "Ġfail ures", + "ur ate", + "ful ness", + "cy cl", + "ĠSub ject", + "Ġinf inite", + "th ree", + "W A", + "p it", + "ĠInst all", + "R ad", + "ili ation", + "G M", + "Ġcontin ent", + "Ġaccommod ate", + "ĠCl ay", + "Ġp up", + "ĠF unction", + "Ġham mer", + "ĠAlbert a", + "Ġrev ised", + "Ġminor ities", + "Ġmeasure ment", + "Con nell", + "Ġdis able", + "ĠM ix", + "In cre", + "Ġfor k", + "ĠR osen", + "Ġimpl ies", + "umb lr", + "AN G", + "Ġprote ins", + "Ġagg ression", + "Ġfacilit ate", + "S N", + "Ġilleg ally", + "u er", + "Ġacad em", + "Ġp uzz", + "ĠSh ift", + "p ay", + "oll o", + "Ġaud iences", + "B uild", + "Ġno ble", + "Ġsynt ax", + "â ĺħ", + "Ġbe am", + "ĠB ed", + "ĠA ld", + "Ġorig ins", + "v ideo", + "Ġ19 77", + "ĠAss ault", + "Ġgar age", + "Te am", + "Ġver dict", + "Ġd war", + "ĠVirt ual", + "e vent", + "Ke ep", + "Ġsent iment", + "Ġwild life", + "sh irt", + "Ġb urg", + "Ġrecommend ation", + "rep resent", + "Ġgall ery", + "own ers", + "Ġsch olar", + "Ġconven ience", + "ĠSw ift", + "Ġconv inc", + "C ap", + "Ġwar fare", + "ĠVis ual", + "Ġconst itute", + "Ġab ort", + "ĠWe ather", + "ĠLook ing", + "ĠH em", + "Ġmart ial", + "Ġinc oming", + "et ition", + "Ġtoler ance", + "ĠCre ated", + "Ġfl ows", + "ĠE lder", + "Ġsoul s", + "Ġf oul", + "ĠP ain", + "ĠC AN", + "Ġ2 20", + "b c", + "he nd", + "Ġgen ius", + "R eal", + "ĠW r", + "omet er", + "p ad", + "Ġlim iting", + "ĠS i", + "ĠL ore", + "ĠAd ventures", + "Ġvar ied", + "D isc", + "f in", + "ĠPerson al", + "Ch ris", + "Ġinv ented", + "Ġd ive", + "ĠR ise", + "Ġo z", + "ĠCom ics", + "Ġexp ose", + "ĠRe b", + "let ters", + "s ite", + "im ated", + "Ġh acking", + "Ġeduc ated", + "ĠNob ody", + "Ġdep ri", + "Ġincent ive", + "ãĤ ·", + "Ġovers ight", + "Ġtrib es", + "ĠBelg ium", + "Ġlicens ing", + "our t", + "Produ ct", + "ah l", + "ĠG em", + "Ġspecial ist", + "Ġc ra", + "ann ers", + "ĠCor byn", + "Ġ19 73", + "RE AD", + "Ġsum mar", + "Ġover look", + "ĠApp lication", + "Ġin appropriate", + "Ġdownload ed", + "Q ue", + "ĠB ears", + "Ġth umb", + "ĠChar acter", + "ĠReincarn ated", + "ĠS id", + "Ġdemonstr ates", + "s ky", + "ĠBloom berg", + "ĠAr ray", + "ĠRes ults", + "ĠFour th", + "ĠED T", + "ĠO scar", + "c end", + "Ġ10 6", + "ĠN ULL", + "ĠH ERE", + "m atch", + "ĠBr un", + "Ġgluc ose", + "ie g", + "eg u", + "Ġcert ified", + "Ġrel ie", + "Ġhuman itarian", + "Ġpr ayers", + "K ing", + "Ġn an", + "h ou", + "10 8", + "ul u", + "Ġrenew able", + "Ġdistingu ish", + "Ġd ense", + "ĠV ent", + "ĠPack age", + "ĠB oss", + "Ġedit ors", + "Ġm igr", + "T ra", + "ĠPet ers", + "ĠAr ctic", + "200 4", + "ĠC ape", + "Ġloc ally", + "Ġlast ing", + "Ġhand y", + ". ).", + "P an", + "ĠR ES", + "Ind ex", + "Ġt ensions", + "Ġformer ly", + "Ġide ological", + "Ġsens ors", + "Ġdeal ers", + "Ġdef ines", + "S k", + "Ġproceed s", + "Ġpro xy", + "az ines", + "ĠB ash", + "ĠP ad", + "ĠC raft", + "eal ous", + "Ġshe ets", + "omet ry", + "J une", + "cl ock", + "T T", + "ĠThe atre", + "ĠB uzz", + "Ġch apters", + "Ġmill enn", + "Ġd ough", + "ĠCongress ional", + "Ġimag ined", + "av ior", + "Ġclin ic", + "Ġ19 45", + "Ġhold er", + "ro ot", + "oles ter", + "Ġrest art", + "B N", + "ĠHam as", + "ĠJ ob", + "Ġor b", + "Ġr am", + "Ġdiscl ose", + "Ġtransl ate", + "Ġimm igrant", + "Ġannoy ing", + "Ġtreat y", + "an ium", + "ĠTe a", + "ĠLeg ion", + "Ġcrowd s", + "ĠB ec", + "ĠA er", + "oh yd", + "B ro", + "Look ing", + "Ġl bs", + "Ġagg ress", + "Ġse am", + "Ġinter cept", + "ĠM I", + "mer cial", + "act iv", + "ĠC it", + "Ġdim ension", + "Ġconsist ency", + "Ġr ushing", + "ĠDou glas", + "Ġtr im", + "Inst all", + "ick er", + "Ġsh y", + "10 6", + "Ġment ions", + "pe lled", + "ĠT ak", + "c ost", + "Ġclass room", + "Ġfort une", + "dri ven", + "Ġun le", + "ĠWhe el", + "Ġinvest or", + "ĠM asters", + "k it", + "Ġassoci ations", + "ĠEv olution", + "op ing", + "us cript", + "Ġprov incial", + "ĠWal ter", + "av i", + "S O", + "Ġun limited", + "Eng lish", + "ĠC ards", + "ĠEb ola", + "ne red", + "Ġreven ge", + "Ġout right", + "um per", + "Ġf itting", + "ĠSol id", + "Ġform ally", + "Ġproblem atic", + "Ġhaz ard", + "Ġenc ryption", + "Ġstraight forward", + "ĠA K", + "Ġp se", + "ĠOr b", + "ĠCh amber", + "ĠM ak", + "Cont ents", + "Ġloyal ty", + "Ġl yrics", + "ĠSy m", + "Ġwel comed", + "Ġcook ed", + "Ġmon op", + "Ġn urse", + "Ġmis leading", + "Ġe ternal", + "Ġshif ting", + "Ġ+ =", + "V is", + "Ġinst itutional", + "ill ary", + "Ġp ant", + "VER T", + "ĠA CC", + "ĠEn h", + "Ġinc on", + "ĠRE UTERS", + "Ġdon ated", + "âĢ¦âĢ¦ âĢ¦âĢ¦", + "In tern", + "Ġexhib it", + "Ġt ire", + "ĠR ic", + "ĠCh ampion", + "ĠMu hammad", + "N ING", + "ĠSoc cer", + "Ġmob ility", + "Ġvary ing", + "ĠM ovie", + "Ġl ord", + "o ak", + "F ield", + "Ġve ctor", + "us ions", + "Ġsc rap", + "Ġen abling", + "m ake", + "T or", + ". *", + "| |", + "ĠWe bsite", + "ĠN PC", + "Ġsocial ist", + "ĠBill y", + "ĠAdd itional", + "Ġc argo", + "Ġfar ms", + "ĠSo on", + "ĠPri ze", + "Ġmid night", + "Ġ9 00", + "se en", + "ĠSp ot", + "Ġshe ep", + "Ġspons ored", + "ĠH i", + "ĠJ ump", + "Ġ19 67", + "Micro soft", + "ĠAg ent", + "Ġch arts", + "d ir", + "Ġadj acent", + "Ġtr icks", + "Ġman ga", + "Ġex agger", + "/ >", + "foot ball", + "ĠF CC", + "G C", + "ĠT ier", + "and ra", + "OU ND", + "% ),", + "Ġfru its", + "V C", + "ĠA A", + "R ober", + "Ġmid st", + "â Ĺ", + "ank a", + "Ġlegisl ature", + "ĠNe il", + "Ġtour ists", + "\" \"", + "ĠWar ning", + "ĠNever theless", + "ĠOffic ial", + "ĠWh atever", + "Ġm old", + "Ġdraft ed", + "Ġsubst ances", + "Ġbre ed", + "Ġt ags", + "ĠT ask", + "Ġver b", + "Ġmanufact ured", + "com ments", + "ĠPol ish", + "Pro v", + "Ġdetermin es", + "Ob ama", + "k ers", + "Ġutter ly", + "Ġse ct", + "sc he", + "ĠG ates", + "ĠCh ap", + "Ġal uminum", + "Ġz ombie", + "ĠT ouch", + "ĠU P", + "Ġsatisf y", + "Ġpred omin", + "asc ript", + "Ġelabor ate", + "Ġ19 68", + "Ġmeas uring", + "ĠV ari", + "any ahu", + "Ġs ir", + "ul ates", + "id ges", + "ick ets", + "ĠSp encer", + "T M", + "oub ted", + "Ġpre y", + "Ġinstall ing", + "ĠC ab", + "re ed", + "re ated", + "Su pp", + "Ġwr ist", + "ĠK erry", + "10 7", + "ĠK le", + "ĠR achel", + "Ġc otton", + "ĠA RE", + "ĠE le", + "Cont rol", + "Ġload s", + "ĠD od", + "an as", + "b one", + "Ġclass ical", + "ĠReg ional", + "ĠInt eg", + "V M", + "Ġdes ires", + "Ġaut ism", + "support ed", + "ĠM essage", + "Ġcomp act", + "writ er", + "Ġ10 9", + "ĠHur ricane", + "c ision", + "Ġcy cles", + "Ġdr ill", + "Ġcolle ague", + "Ġm aker", + "G erman", + "Ġmist aken", + "S un", + "ĠG ay", + "Ġwhat soever", + "Ġsell s", + "ĠA irl", + "l iv", + "ĠO ption", + "Ġsol ved", + "Ġse ctors", + "Ġhorizont al", + "Ġequ ation", + "ĠSk ill", + "ĠB io", + "g ement", + "ĠSn ap", + "ĠLeg al", + "Ġtradem ark", + "Ġmake up", + "Ġassemb led", + "Ġsa ves", + "ĠHallow een", + "ĠVer mont", + "ĠFR OM", + "Ġfar ming", + "ĠP odcast", + "accept able", + "ĠHig her", + "Ġas leep", + "ull ivan", + "Ġrefere n", + "ĠLe v", + "Ġbul lets", + "ok o", + "H C", + "Ġst airs", + "Ġmain tains", + "ĠL ower", + "ĠV i", + "Ġmar ine", + "Ġac res", + "Ġcoordin ator", + "ĠJ oh", + "Ġcounterpart s", + "ĠBrother s", + "Ġind ict", + "b ra", + "Ġch unk", + "Ġc ents", + "H ome", + "ĠMon th", + "Ġaccording ly", + "if les", + "ĠGerm ans", + "ĠSy n", + "H ub", + "Ġey eb", + "âĶĢâĶĢ âĶĢâĶĢ", + "Ġr anges", + "ĠHoll and", + "ĠRob ot", + "f c", + "M ike", + "Ġpl asma", + "Ġsw ap", + "Ġath lete", + "ĠR ams", + ",' \"", + "Ġinfect ions", + "Ġcor rid", + "Ġv ib", + "Ġpat ches", + "Ġtradition ally", + "Ġrevel ation", + "Ġswe ep", + "Ġgl ance", + "Ġin ex", + "200 3", + "ĠR aw", + "work ing", + "os ures", + "ĠD at", + "ĠLyn ch", + "Ġle verage", + "ĠRe id", + "Ġcorrel ation", + "ian ces", + "av ascript", + "Ġrep ository", + "ret ty", + "Ġ19 72", + "24 0", + "Ġo un", + "p ol", + "ĠRe ed", + "Ġtact ical", + "is ite", + "App le", + "ĠQu inn", + "Ġrap ed", + "ill o", + "Euro pe", + "Ġalgorith ms", + "ĠRod rig", + "i u", + "Ġill um", + "Ġf ame", + "Ġintrodu cing", + "Ġdel ays", + "ĠRaid ers", + "Ġwh istle", + "Ġnovel s", + "ĠRe ally", + "Ġder iv", + "Ġpublic ations", + "ĠNe ither", + "ĠCom merce", + "Ġa ston", + "l anguage", + "Not es", + "ĠR oth", + "ĠF ear", + "Ġm ate", + "Ġpar ade", + "ĠQ B", + "Ġman eu", + "ĠC incinnati", + "m itting", + "Ġwa ist", + "ĠR ew", + "Ġdisc ont", + "Ð °", + "Ġst aring", + "Ġal ias", + "Ġsec urities", + "Ġtoile t", + "ĠJ edi", + "Ġun law", + "v ised", + "//// ////", + "] (", + "ĠWe iss", + "Ġpre st", + "ĠComp an", + "Ġmem o", + "ĠGr ace", + "J uly", + "ĠEl ite", + "cent er", + "ĠSt ay", + "Ġgal axy", + "Ġto oth", + "ĠS ettings", + "Ġsubject ed", + "ãĤ ¦", + "Ġline back", + "Ġretail ers", + "ĠW ant", + "Ġd angers", + "A ir", + "Ġvolunt ary", + "ew ay", + "Ġinterpret ed", + "ot ine", + "à §", + "Ġp el", + "Serv ice", + "ĠEvent ually", + "Ġcare ers", + "Ġthreat en", + "Ġmem or", + "ĠBrad ley", + "anc ies", + "s n", + "ĠUn known", + "N ational", + "Ġsh adows", + "ail and", + "ĠD ash", + "Every one", + "izz ard", + "M arch", + "= (", + "Ġpull s", + "Ġstr anger", + "Ġback wards", + "ĠBern ard", + "imens ional", + "Ġch ron", + "Ġtheoret ical", + "k top", + "Ġw are", + "ĠInvest ig", + "ĠIn iti", + "ĠOper ations", + "o ven", + "oc ide", + "* /", + "Ġfl ames", + "ĠC ash", + "sh it", + "Ġc ab", + "ĠAn aly", + "ĠSe ah", + "Ġdefin ing", + "Ġorder ing", + "Ġimm un", + "Ġpers istent", + "AC H", + "Russ ian", + "m ans", + "Ġh ind", + "Ġphot ography", + " ©", + "Ġh ug", + "Ġ10 7", + "ĠH ence", + "i ots", + "ude au", + "Ġsubsid ies", + "Ġroutine ly", + "ĠDev ice", + "it ic", + "Ġdisg ust", + "land er", + "Ġ19 40", + "Ġassign ment", + "ĠB esides", + "w ick", + "ĠD ust", + "us c", + "struct ed", + "11 1", + "de velop", + "Ġf ond", + "Ġinter section", + "Ġdign ity", + "Ġcommission er", + "With out", + "re ach", + "Ġcart oon", + "Ġsc ales", + "ãĥ Ń", + "F IG", + "Ġsurve ys", + "ĠIndones ia", + "Ġart work", + "Ġun ch", + "Ġcy cling", + "un ct", + "au er", + "or ate", + "ĠOb viously", + "Ġcharacter ized", + "fe ld", + "Ġaff irm", + "Ġinn ings", + "Ġ é", + "Ġal iens", + "Ġcl oth", + "et ooth", + "ĠC ertain", + " §", + "Ġdig est", + "k now", + "ĠX L", + "Ġpredict ions", + "Ġd in", + "W AR", + "Ġafter math", + "Ex ample", + "ĠSu ccess", + "ĠTh r", + "IG N", + "Ġmin er", + "B us", + "Ġcl arity", + "heim er", + "ĠO UT", + "ĠS end", + "ĠCirc le", + "ĠD iet", + "Ġpron ounced", + "Ġcreat ors", + "Ġearthqu ake", + "atter y", + "ge ons", + "Ġo d", + "Ġlay ing", + "or p", + "U lt", + "pro ject", + "Ġunder min", + "Ġsequ el", + "S am", + "ĠDark ness", + "Ġre ception", + "b ull", + "Y S", + "ĠV ir", + "Ġsequ ences", + "ĠCo in", + "Ġout fit", + "ĠW ait", + "1 19", + "Ġdel ivers", + ".... ..", + "Ġbl own", + "ĠE sc", + "ĠM ath", + "per m", + "ĠU l", + "Ġgl im", + "Ġfac ial", + "Ġgreen house", + "Ġto kens", + "/ -", + "ĠAnn ual", + "ĠON E", + "Ġteen age", + "ĠPhys ical", + "ĠL ang", + "ĠC elt", + "Ġsu ed", + "ivid ually", + "Ġpat ience", + "ch air", + "reg ular", + "Ġa ug", + "in v", + "ex cept", + "ĠL il", + "Ġn est", + "f d", + "s um", + "ĠCh ase", + "Russ ia", + "ĠJenn ifer", + "Ġoff season", + "Over all", + "F ore", + "Ġr iot", + "A ud", + "form er", + "Ġdefend ers", + "ĠC T", + "iot ic", + "rib ly", + "Ġautom ated", + "Ġpen is", + "Ġins ist", + "Ġdi agram", + "ĠS QL", + "ĠG arc", + "Ġw itch", + "cl ient", + "ier ra", + "am bers", + "Ġrec ount", + "f ar", + "V ery", + "oster one", + "Ġappreci ated", + "ĠPer fect", + "S ection", + "Ġd oses", + "oca ust", + "Ġcost ly", + "Ġg rams", + "ĠSh i", + "Ġwrest ling", + "Ġ19 71", + "Ġtro phy", + "Ġn erve", + "ĠK az", + "ĠExper ience", + "Ġpled ged", + "Ġplay back", + "Ġcreat ivity", + "by e", + "Ġattack ers", + "Ġhold ers", + "ĠCo ach", + "ĠPh D", + "Ġtransf ers", + "Ġcol ored", + "ĠH indu", + "Ġd rown", + "Ġlist ened", + "ĠW A", + "ias m", + "P O", + "Ġappeal ing", + "Ġdiscl osed", + "ĠCh icken", + "ag ging", + "Ġple aded", + "Ġnav igation", + "ĠReturn s", + "Ġ[ [", + "R OR", + "E A", + "Ġphotograp her", + "ĠR ider", + "ipp ers", + "Ġsl ice", + "Ġe rect", + "Ġhe d", + "iss ance", + "ĠVik ings", + "ur ious", + "Ġapp et", + "oubted ly", + "Ch ild", + "Ġauthent ic", + "o os", + "ĠM aking", + "Ġannoun cing", + "Ġb od", + "Ġmet er", + "ĠN ine", + "ĠR ogue", + "Ġwork force", + "Ġrenew ed", + "Ġorganis ations", + "ac s", + "P LE", + "Sh ort", + "Ġcomp ounds", + "ĠVis it", + "Ġen velop", + "ear th", + "Ġsupport ive", + "gg le", + "ĠBrus sels", + "ĠGu ild", + "Cre ate", + "RE L", + "Ġaver aged", + "Ġ19 69", + "ri ages", + "Ġlength y", + "Ġforg ot", + "O kay", + "ĠE rd", + "Ġdeal er", + "Ġrec ession", + "D D", + "Ġdesper ately", + "Ġhun ger", + "Ġst icks", + "Ġm ph", + "ĠF aith", + "Ġintention ally", + "Ġdem ol", + "ue ller", + "ĠS ale", + "Ġde bris", + "s pring", + "Ġle ap", + ">> >>", + "Ġcontain ers", + "se lling", + "rane an", + "atter ing", + "Ġcomment ed", + "ĠC M", + "on ut", + "Ġwood s", + "es pecially", + "Ġorgan ize", + "iv ic", + "ĠWood s", + "ang a", + "s qu", + "Ġm aj", + "am on", + "Ġax is", + "Ġ19 74", + "ĠDen mark", + "Ġwar rior", + "ĠP and", + "Ġout lined", + "ĠB O", + "ins ula", + "z illa", + "eb ook", + "Ġd are", + "Ġsear ched", + "Ġnav igate", + "S n", + "writ ing", + "Ġun ited", + "J apan", + "ĠHe brew", + "Ġfl ame", + "Ġrel ies", + "Ġcatch ing", + "ĠSh o", + "Ġimprison ment", + "Ġp ockets", + "Ġclos ure", + "ĠF am", + "t im", + "ade qu", + "Act ivity", + "Ġrecru iting", + "ĠW ATCH", + "ĠArgent ina", + "d est", + "Ġapolog ize", + "or o", + "Ġlack s", + "Ġtun ed", + "ĠGriff in", + "Ġinf amous", + "Ġcelebr ity", + "ss on", + "Ġ ----------------------------------------------------------------", + "ĠIs is", + "ĠDis play", + "Ġcred ibility", + "Ġeconom ies", + "Ġhead line", + "ĠCow boys", + "Ġind ef", + "Ġl ately", + "Ġincent ives", + "but ton", + "ĠM ob", + "A ut", + "Ġres igned", + "ĠO m", + "c amp", + "Ġprof iles", + "Ġsche mes", + "olph ins", + "ay ed", + "Cl inton", + "en h", + "ĠY ahoo", + "Ġab st", + "Ġan k", + "su its", + "Ġw ished", + "ĠMar co", + "udd en", + "Ġsp here", + "ĠB ishop", + "Ġincorpor ated", + "ĠPl ant", + "11 4", + "Ġh ated", + "p ic", + "Ġdon ate", + "Ġl ined", + "Ġbe ans", + "Ġsteal ing", + "Ġcost ume", + "Ġsher iff", + "Ġfor ty", + "Ġint act", + "Ġadapt ed", + "Ġtrave lling", + "b art", + "Ġnice ly", + "Ġdri ed", + "Ġsc al", + "os ity", + "NOT E", + "ĠB h", + "ĠBron cos", + "ĠI gn", + "Ġint imate", + "Ġchem istry", + "Ġopt imal", + "D eb", + "ĠGener ation", + "Ġ] ,", + "ich i", + "ĠW ii", + "ĠYOU R", + "vent ions", + "W rite", + "Ġpop ul", + "un ning", + "ĠW or", + "V ol", + "Ġqu een", + "head s", + "K K", + "Ġanaly ze", + "op ic", + "ear chers", + "Ġd ot", + "leg raph", + "ast ically", + "Ġupgr ades", + "Ġca res", + "Ġext ending", + "Ġfree ze", + "Ġin ability", + "Ġorg ans", + "Ġpret end", + "Ġout let", + "11 3", + "ol an", + "ĠM all", + "ul ing", + "t alk", + "Ġexpress ing", + "ĠAl ways", + "ĠBe gin", + "f iles", + "Ġlic enses", + "% %", + "ĠM itt", + "Ġfil ters", + "ĠMil waukee", + "G N", + "Ġunf old", + "M o", + "Ġnut rition", + "pp o", + "B o", + "Ġfound ing", + "Ġunder mine", + "Ġeas iest", + "ĠC zech", + "ĠM ack", + "Ġsexual ity", + "ĠN ixon", + "W in", + "ĠAr n", + "ĠK in", + "ãĤ £", + "ic er", + "Ġfort un", + "Ġsurf aces", + "agh d", + "Ġcar riers", + "ĠP ART", + "ĠT ib", + "Ġinter val", + "Ġfrust rating", + "ĠSh ip", + "ĠAr med", + "ff e", + "Ġbo ats", + "ĠAb raham", + "in is", + "Ġsu ited", + "th read", + "i ov", + "ab ul", + "ĠVenezuel a", + "Ġto m", + "su per", + "Ġcast le", + "alth ough", + "iox ide", + "ec hes", + "Ġevolution ary", + "Ġnegoti ate", + "Ġconfront ed", + "Rem ember", + "Ġ17 0", + "S uch", + "Ġ9 11", + "m ult", + "ĠA byss", + "ur ry", + "ke es", + "spe c", + "ĠBarb ara", + "Ġbelong ing", + "Ġvill ain", + "ist ani", + "Ġaccount able", + "Ġport ions", + "ĠDe cl", + "U r", + "ĠK ate", + "g re", + "Ġmag azines", + "UC K", + "Ġregul ate", + "om on", + "ĠAl most", + "Ġover view", + "Ġsc ram", + "Ġl oot", + "ĠF itz", + "Ġcharacter istic", + "ĠSn ake", + "s ay", + "ĠR ico", + "Ġtra it", + "ĠJo ined", + "au cus", + "Ġadapt ation", + "ĠAirl ines", + "Ġarch ae", + "ĠI de", + "Ġb ikes", + "Ġliter ary", + "Ġinflu ences", + "ĠUs ed", + "C reat", + "Ġple a", + "ĠDef ence", + "ĠAss ass", + "Ġp ond", + "UL T", + ") \"", + "Ġeval uated", + "Ġob taining", + "Ġdem ographic", + "Ġvig il", + "ale y", + "Ġsp ouse", + "ĠSeah awks", + "resp ons", + "ĠB elt", + "um atic", + "Ġr ises", + "run ner", + "ĠMichel le", + "Ġpot ent", + "r ace", + "ĠP AC", + "F ind", + "olester ol", + "IS S", + "ĠIntrodu ced", + "ress es", + "ign ment", + "O s", + "ĠT u", + "ĠDe x", + "ic ides", + "Ġspark ed", + "ĠLaur a", + "ĠBry ant", + "Ġsm iling", + "ĠNex us", + "Ġdefend ants", + "ĠCat al", + "Ġdis hes", + "sh aped", + "Ġpro long", + "m t", + "( $", + "ãĢ Ĥ", + "Ġcalcul ations", + "ĠS ame", + "Ġp iv", + "H H", + "Ġcance lled", + "Ġgr in", + "Ġterrit ories", + "ist ically", + "C ome", + "ĠP arent", + "Pro ject", + "Ġneg lig", + "ĠPriv acy", + "Ġam mo", + "LE CT", + "olute ly", + "ĠEp ic", + "Ġmis under", + "w al", + "Apr il", + "m os", + "path y", + "ĠC arson", + "Ġalbum s", + "ĠE asy", + "Ġpist ol", + "< <", + "Ġ\\ (", + "t arget", + "hel p", + "Ġinter pre", + "cons cious", + "ĠH ousing", + "ĠJ oint", + "12 7", + "Ġbe ers", + "s cience", + "ĠFire fox", + "effect ive", + "ĠC abin", + "ĠO kay", + "ĠApp lic", + "Ġspace craft", + "ĠS R", + "ve t", + "ĠStr ange", + "S B", + "Ġcor ps", + "iber al", + "e fficient", + "Ġpreval ence", + "Ġeconom ists", + "11 8", + "Th read", + "ord able", + "OD E", + "ĠC ant", + "=- =-", + "if iable", + "ĠA round", + "Ġpo le", + "Ġwilling ness", + "CL A", + "ĠK id", + "Ġcomple ment", + "Ġsc attered", + "Ġin mates", + "Ġble eding", + "e very", + "Ġque ue", + "ĠTr ain", + "Ġh ij", + "Ġme lee", + "ple ted", + "Ġdig it", + "Ġg em", + "offic ial", + "Ġlif ting", + "Ð µ", + "Re qu", + "it utes", + "Ġpack aging", + "ĠWork ers", + "h ran", + "ĠLeban on", + "ol esc", + "Ġpun ished", + "ĠJ uan", + "Ġj am", + "ĠD ocument", + "Ġm apping", + "ic ates", + "Ġinev itably", + "Ġvan illa", + "ĠT on", + "Ġwat ches", + "Ġle agues", + "Ġiniti ated", + "deg ree", + "port ion", + "Ġrec alls", + "Ġru in", + "Ġm elt", + "I AN", + "Ġhe m", + "Ex p", + "Ġb aking", + "ĠCol omb", + "at ible", + "Ġrad ius", + "pl ug", + "ĠI F", + "et ically", + "Ġf ict", + "H ER", + "ĠT ap", + "atin um", + "Ġin k", + "Ġco h", + "ĠW izard", + "b oth", + "te x", + "Ġsp ends", + "ĠCurrent ly", + "ĠP it", + "Ġneur ons", + "ig nt", + "Ġr all", + "Ġbus es", + "b uilding", + "Ġadjust ments", + "Ġc ried", + "ibl ical", + "att ed", + "ĠZ ion", + "ĠM atter", + "Ġmed itation", + "ĠD ennis", + "Ġour s", + "ĠT ab", + "Ġrank ings", + "ort al", + "Ġad vers", + "Ġsur render", + "ĠG ob", + "ci um", + "om as", + "im eter", + "Ġmulti player", + "Ġhero in", + "Ġoptim istic", + "Ġindic ator", + "ĠBr ig", + "Ġgro cery", + "Ġapplic ant", + "ĠRock et", + "v id", + "Ex ception", + "p ent", + "Ġorgan izing", + "Ġenc ounters", + "ĠT OD", + "Ġjew el", + "S ave", + "ĠChrist ie", + "Ġhe ating", + "Ġl azy", + "ĠC P", + "Ġcous in", + "Con fig", + "Ġreg ener", + "Ġne arest", + "Ġachie ving", + "EN S", + "th row", + "ĠRich mond", + "ant le", + "200 2", + "Ġan ten", + "b ird", + "13 3", + "Ġn arc", + "r aint", + "un ny", + "ĠHispan ic", + "ourn aments", + "Ġprop he", + "ĠTh ailand", + "ĠT i", + "Ġinject ion", + "Ġinher it", + "rav is", + "Ġmed i", + "Ġwho ever", + "ĠDE BUG", + "G P", + "ĠH ud", + "C ard", + "p rom", + "Ġp or", + "Ġover head", + "L aw", + "Ġviol ate", + "Ġhe ated", + "Ġdescript ions", + "Ġachieve ments", + "ĠBe er", + "ĠQu ant", + "W as", + "Ġe ighth", + "ĠI v", + "Ġspecial ized", + "U PDATE", + "ĠD elta", + "P op", + "J ul", + "ĠAs k", + "oph y", + "Ġnews letters", + "ĠT ool", + "Ġg ard", + "ĠConf eder", + "ĠGM T", + "ĠAb bott", + "Ġimm unity", + "ĠV M", + "Is lam", + "Ġimpl icit", + "w d", + "Ġ19 44", + "rav ity", + "omet ric", + "Ġsurv iving", + "ur ai", + "ĠPr ison", + "Ġr ust", + "ĠSk etch", + "Ġbe es", + "ĠThe ory", + "Ġmer it", + "T ex", + "ch at", + "Ġm im", + "Ġpast e", + "ĠK och", + "Ġignor ance", + "ĠSh oot", + "Ġbas ement", + "Un ited", + "ĠAd vis", + "he ight", + "Ġf oster", + "Ġdet ain", + "in formation", + "Ġne ural", + "' ;", + "Ġprov es", + "all ery", + "Ġinv itation", + "um bers", + "Ġc attle", + "Ġbicy cle", + "z i", + "Ġconsult ant", + "Ġap ology", + "ĠT iger", + "Ġ12 3", + "99 9", + "Ġind ividually", + "r t", + "ig ion", + "ĠBrazil ian", + "Ġdist urb", + "Ġentreprene urs", + "Ġfore sts", + "cer pt", + "pl ates", + "p her", + "clip se", + "Ġtw itter", + "Ġac ids", + "ograph ical", + "h um", + "ĠB ald", + "if ully", + "Ġcomp iler", + "ĠD A", + "Ġdon or", + "as i", + "Ġtrib al", + "l ash", + "ĠCon fig", + "Ġapplic ants", + "Ġsal aries", + "13 5", + "Put in", + "ĠF ocus", + "ir s", + "Ġmisc onduct", + "ĠH az", + "Ġeat en", + "M obile", + "Mus lim", + "ĠMar cus", + "v iol", + "Ġfavor able", + "Ġst ub", + "ad in", + "ĠH ob", + "Ġfaith ful", + "Ġelectron ics", + "Ġvac uum", + "w ait", + "back ed", + "econom ic", + "d ist", + "Ġten ure", + "Ġsince re", + "ĠT ogether", + "ĠW ave", + "Ġprog ression", + "Ġden ying", + "Ġdist ress", + "br aska", + "th ird", + "Ġmix ing", + "Ġcolon ial", + "Ġpriv ately", + "Ġun rest", + "atern ity", + "Ġprem ises", + "ant i", + "greg ation", + "Ġlic ence", + "ĠH ind", + "ĠSam uel", + "Ġconvinc ing", + "ĠA ce", + "ĠR ust", + "ĠNet anyahu", + "Ġhand les", + "ĠP atch", + "orient ed", + "ah o", + "ĠG onz", + "Ġhack ers", + "claim er", + "Ġcustom s", + "ĠGr an", + "f ighters", + "Ġl uc", + "Ġman uscript", + "aren thood", + "Ġdev il", + "Ġwar riors", + "Ġoff enders", + "Will iam", + "Ġhol idays", + "Ġnight mare", + "Ġle ver", + "iff erent", + "St at", + "Ġexhib ition", + "put ed", + "ĠP ure", + "Ġal pha", + "Ġenthus iasm", + "ĠRepresent atives", + "E AR", + "ĠT yp", + "Ġwhe at", + "ĠAl f", + "Ġcor rection", + "Ġev angel", + "AT T", + "M iss", + "Ġs oup", + "Ġimpl ied", + "par am", + "Ġsex y", + "ĠL ux", + "Ġrep ublic", + "p atch", + "ab lish", + "Ġic ons", + "Ġfather s", + "ĠG ET", + "ĠCar ib", + "Ġregul ated", + "ĠCo hen", + "ĠBob by", + "Ġn er", + "Ġb ent", + "vent ory", + "ĠAl ong", + "ĠE ST", + "ĠWall ace", + "Ġmurd ers", + "r ise", + "ke ll", + "ĠCommon wealth", + "Ġn asty", + "et a", + "ĠM IT", + "Ġadminist ered", + "Ġgenuine ly", + "Ed itor", + "n ick", + "Ġhyd ro", + "**************** ****************", + "ĠB le", + "Ġfin es", + "Ġg orge", + "aus ible", + "r h", + "Ġapp le", + "ment ioned", + "Ġro pe", + "ot yp", + "H R", + "Ġdisappoint ing", + "Ġc age", + "n ik", + "Ġdoub ts", + "ĠF REE", + "print s", + "ĠM UST", + "Ġvend ors", + "ĠIn qu", + "Ġliber als", + "Ġcontract or", + "Ġup side", + "child ren", + "Ġtrick y", + "Ġregul ators", + "charg ed", + "l iter", + "Ġ ***", + "Ġreb ell", + "l ang", + "Ġloc als", + "Ġphys icians", + "Ġhe y", + "ar se", + "t m", + "ĠLe x", + "Ġbehavior al", + "success ful", + "F X", + "Ġbr ick", + "ov ic", + "Ġcon form", + "Ġreview ing", + "Ġins ights", + "Ġbi ology", + "ĠRem ove", + "ĠExt ra", + "Ġcomm itting", + "indu ced", + "ignt y", + "ig m", + "Ġat omic", + "Comm on", + "ĠE M", + "ĠP ere", + "ĠIt ems", + "e h", + "Ġpres erved", + "ĠH ood", + "Ġprison er", + "Ġbankrupt cy", + "Ġg ren", + "us hes", + "Ġexplo itation", + "Ġsign atures", + "Ġfin an", + "] ,\"", + "ĠM R", + "Ġme g", + "rem lin", + "Ġmusic ians", + "Ġselect ing", + "Ġexam ining", + "IN K", + "l ated", + "H i", + "Ġart ic", + "Ġp ets", + "Ġimp air", + "ĠM AN", + "Ġtable ts", + "in clude", + "R ange", + "Ġca ut", + "Ġlog s", + "Ġmount ing", + "Ġun aware", + "Ġdynam ics", + "ĠPalest ine", + "ĠQu arter", + "ĠPur ple", + "Ġm a", + "ĠIm port", + "Ġcollect ions", + "ci ation", + "Ġsuccess or", + "Ġcl one", + "Ġaim ing", + "Ġposs essed", + "Ġstick ing", + "Ġsh aking", + "Ġloc ate", + "ĠH ockey", + "T urn", + "17 0", + "Ġfif teen", + "ĠHar rison", + "Ġcontinu ously", + "ĠT C", + "ĠVal ent", + "ĠRes cue", + "Ġby pass", + "am ount", + "Ġm ast", + "Ġprotect s", + "Ġart istic", + "Ġsomet ime", + "Ġsh oe", + "Ġshout ed", + "ific ant", + "et itive", + "ĠReg ister", + "ĠJ in", + "Ġconcent rated", + "ling ton", + "on ies", + "Ġgener ator", + "yr im", + "ĠAr men", + "Ġclear ing", + "id o", + "ĠT W", + "al ph", + "Ġlad ies", + "H ard", + "Ġdial og", + "Ġinput s", + "æ ľ", + "Ġpos es", + "Ġsl ots", + "ĠPrem ium", + "Ġle aks", + "Ġboss es", + "Ġ11 3", + "c ourse", + "A cc", + "ĠNew ton", + "ĠAust ria", + "ĠM age", + "Ġte aches", + "ab ad", + "Ġwe ars", + "Ġc yl", + "Ġcur se", + "ĠS ales", + "ĠW ings", + "Ġp sy", + "Ġg aps", + "ĠIce land", + "ĠP interest", + "Ġland lord", + "Ġdefin itions", + "ĠK er", + "Ġsufficient ly", + "ĠP ence", + "ĠArch itect", + "Ġsur pass", + "Ġ11 4", + "Ġsuper hero", + "ĠDise ase", + "Ġpri ests", + "ĠC ulture", + "Ġdefin itive", + "Ġsecret ly", + "ĠD ance", + "inst all", + "ch ief", + "ĠJess ica", + "W ould", + "Up dated", + "Ġlock er", + "ĠK ay", + "Ġmem orial", + "è ¦", + "f at", + "Ġdis gu", + "Ġflav ors", + "ĠBase ball", + "ĠRes istance", + "Ġk icks", + "Ġen v", + "Ġteen agers", + "D ark", + "ĠC AR", + "Ġh alt", + "ĠL G", + "ĠGab riel", + "Ġfe ver", + "Ġs atur", + "Ġm all", + "Ġaffili ate", + "ĠS leep", + "ĠSpe cific", + "ĠV el", + "Ġj ar", + "ĠSac red", + "ĠEd wards", + "ĠA CL", + "Ġret ained", + "ĠG iant", + "Ġlim itation", + "in ces", + "Ġref usal", + "ĠT ale", + "ĠBut ler", + "Ġacc idents", + "ĠC SS", + "Ġimport ed", + "ĠCop y", + "Î ±", + "ER T", + "z el", + "Ġdiv isions", + "h ots", + "ĠAl b", + "ĠD S", + "Load er", + "W ashington", + "at isf", + "ĠCreat ive", + "\\ .", + "ĠAut om", + "red ict", + "Ġrecept or", + "ĠCarl os", + "Met hod", + "ok a", + "Ġmal icious", + "Ġste pping", + ", [", + "ĠD ad", + "Ġatt raction", + "ĠEffect s", + "ĠPir ate", + "ĠC er", + "ĠIndust ry", + "ĠR ud", + "Ġchar ter", + "Ġd ining", + "Ġins ists", + "Ġconfig ure", + "Ġ( #", + "ĠSim ple", + "ĠSc roll", + "UT C", + "17 5", + "ĠK on", + "Ġmarket place", + "Ġ ãĤ", + "Ġref res", + "Ġg ates", + "er red", + "ĠP od", + "Ġbeh ave", + "Fr ank", + "n ode", + "Ġendors ed", + "he tt", + "as ive", + "ĠHom eland", + "Ġr ides", + "ĠLe ave", + "er ness", + "Ġflood ing", + "A FP", + "Ġris en", + "Ġcontin ually", + "Ġun anim", + "ĠCont ract", + "ĠP as", + "Ġgu ided", + "ĠCh ile", + "b d", + "Ġsu cc", + "pt ic", + "Ġcomm ittees", + "ĠL uther", + "ĠAny one", + "Ġs ab", + "12 4", + "Ġp ixel", + "ĠB ak", + "ĠT ag", + "ĠBenn ett", + "En ter", + "sm all", + "ĠPresident ial", + "Ġp ul", + "Ġcontr ace", + "arch ive", + "Ġcoast al", + "ĠK ids", + "19 2", + "âĢ ²", + "ick y", + "ING TON", + "Ġw olf", + "ĠSt alin", + "T ur", + "id get", + "am as", + "ĠUn less", + "Ġspons or", + "Ġmor ph", + "ĠCho ose", + "Ġrun ner", + "Ġun bel", + "Ġm ud", + "ĠMan a", + "Ġdub bed", + "Ġg odd", + "ure rs", + "wind ow", + "Ġrel ied", + "Ġcelebr ating", + "os c", + "Ġ13 5", + "Ġlobb ying", + "Ġincom plete", + "Ġrestrict ion", + "Ġinc ap", + "it us", + "Ġexpect ation", + "ĠAp ollo", + "Ġint ens", + "Ġsyn c", + "G H", + "Ġmanip ulation", + "B Y", + "Ġspe ar", + "Ġbre asts", + "Ġvol can", + "il ia", + "M aterial", + "Ġform ats", + "ĠB ast", + "Ġparliament ary", + "Ġsn ake", + "Ġserv ants", + "ĠTr udeau", + "ĠGr im", + "ĠArab ic", + "ĠSC P", + "ĠBoy s", + "st ation", + "Ġprospect ive", + "ord e", + "in itialized", + "Ġb ored", + "AB LE", + "Ġaccess ed", + "Ġtax i", + "ĠShe ll", + "aid en", + "urs ed", + "in ates", + "ĠIns urance", + "ĠPet e", + "Sept ember", + "6 50", + "Ġad ventures", + "ĠCo ver", + "Ġt ribute", + "Ġsk etch", + "Ġem power", + "Ġ Ø", + "ĠGl enn", + "ĠD aw", + "= \\\"", + "ĠPolit ics", + "Ġgu ides", + "Ġd ioxide", + "ĠG ore", + "ĠBr ight", + "ĠS ierra", + "Ġval ued", + "c ond", + "Ġpo inter", + "Se lect", + "Ġrisk y", + "Ġabsor b", + "im ages", + "Ġref uses", + "Ġbon uses", + "__ _", + "Ġh ilar", + "ĠF eatures", + "2 20", + "ĠCollect or", + "F oot", + "Ġ19 64", + "cul us", + "Ġd awn", + "Ġwork out", + "ĠL O", + "Ġphilosoph ical", + "ĠSand y", + "ĠYou th", + "Ġl iable", + "A f", + "bl ue", + "Ġovert urn", + "less ness", + "ĠTrib une", + "ĠIn g", + "Ġfact ories", + "Ġcat ches", + "Ġpr one", + "Ġmat rix", + "Ġlog in", + "Ġin acc", + "Ġex ert", + "s ys", + "Ġneed le", + "ĠQ ur", + "Ġnot ified", + "ould er", + "t x", + "Ġremind s", + "Ġpublisher s", + "Ġn ort", + "Ġg it", + "Ġfl ies", + "ĠEm ily", + "Ġflow ing", + "ĠAl ien", + "ĠStr ateg", + "Ġhard est", + "Ġmod ification", + "AP I", + "ĠM Y", + "Ġcr ashes", + "st airs", + "n umber", + "Ġur ging", + "ch annel", + "ĠFal con", + "Ġinhabit ants", + "Ġterr ifying", + "Ġutil ize", + "Ġban ner", + "Ġcig arettes", + "Ġsens es", + "ĠHol mes", + "Ġpract ition", + "ĠPhill ips", + "ott o", + "Ġcomp ile", + "Mod el", + "ĠK o", + "Ġ[ ]", + "Americ ans", + "ĠTer ms", + "Ġmed ications", + "ĠAn a", + "Ġfundament ally", + "ĠNot ice", + "Ġwe aker", + "Ġ 0000", + "Ġgar lic", + "Ġout break", + "Ġeconom ist", + "ĠB irth", + "Ġobst acles", + "ar cer", + "ĠOr thodox", + "Ġplace bo", + "ĠC rew", + "asp berry", + "ĠAng els", + "Ġdis charge", + "Ġdestruct ive", + "11 7", + "ĠR ising", + "Ġd airy", + "l ate", + "Ġcoll ision", + "ĠTig ers", + "ean or", + "ocument ed", + "ĠIn valid", + "Ġd ont", + "ĠL iter", + "ĠV a", + "Ġhyd rogen", + "Ġvari ants", + "ĠBrown s", + "Ġ19 65", + "Ġind igenous", + "Ġtrad es", + "Ġremain der", + "Ġswe pt", + "ĠImp act", + "Ġred ist", + "Ġun int", + "grad uate", + "ãĥ ķ", + "ĠW ILL", + "ãģ® ç", + "ĠCrit ical", + "Ġf isher", + "Ġv icious", + "Ġrevers ed", + "Y ear", + "ĠS ox", + "Ġshoot ings", + "Ġfil ming", + "Ġtouchdown s", + "ai res", + "m el", + "Ġgrand father", + "Ġaffect ion", + "ing le", + "Ġover ly", + "Add itional", + "Ġsup reme", + "ĠGr ad", + "Ġsport ing", + "Ġmer cy", + "ĠBrook s", + "ount y", + "Ġperform s", + "Ġtight ly", + "Ġdem ons", + "Ġkill ings", + "Ġfact ion", + "ĠNov a", + "aut s", + "Ġund oubtedly", + "ar in", + "Ġunder way", + "ra k", + "Ġl iv", + "ĠReg ion", + "Ġbrief ing", + "s ers", + "cl oud", + "ĠM ik", + "us p", + "Ġpred iction", + "az or", + "Ġport able", + "ĠG and", + "Ġpresent ing", + "Ġ10 80", + " »", + "ush i", + "ĠSp ark", + "there um", + "Ġjust ification", + "ĠN y", + "Ġcontract ors", + "ming ham", + "ĠSt yle", + "å ħ", + "ĠChron icles", + "ĠPict ure", + "Ġprov ing", + "Ġw ives", + "set t", + "Ġmole cules", + "ĠFair y", + "Ġconsist ing", + "Ġp ier", + "al one", + "in ition", + "Ġn ucle", + "j son", + "Ġg otta", + "Ġmob il", + "Ġver bal", + "ar ium", + "Ġmon ument", + "uck ed", + "Ġ25 6", + "T ech", + "mine craft", + "ĠTr ack", + "Ġt ile", + "Ġcompat ibility", + "as is", + "Ġs add", + "Ġinstruct ed", + "ĠM ueller", + "Ġle thal", + "Ġhorm one", + "Ġor che", + "el se", + "Ġske let", + "Ġentert aining", + "Ġminim ize", + "ag ain", + "Ġunder go", + "Ġconst raints", + "Ġcig arette", + "ĠIslam ist", + "Ġtravel s", + "ĠPant hers", + "l ings", + "C are", + "Ġlaw suits", + "ur as", + "Ġcry st", + "Ġlow ered", + "Ġaer ial", + "Ġcomb inations", + "Ġha un", + "Ġch a", + "Ġv ine", + "Ġquant ities", + "Ġlink ing", + "b ank", + "Ġso y", + "B ill", + "ĠAngel a", + "Ġrecip ient", + "ĠProt est", + "Ġs ocket", + "Ġsolid arity", + "Ġâ Ĩ", + "m ill", + "Ġvar ies", + "ĠPak istani", + "Dr agon", + "Ġun e", + "Ġhor izon", + "³³³³ ³³³³", + "Ġprov inces", + "Ġfrank ly", + "Ġenact ed", + "not es", + "[ '", + "Ġ19 2", + "ocr acy", + "Ġendorse ment", + "Ġover time", + "Tr ue", + "L ab", + "lic ted", + "ĠD NC", + "Ġbe ats", + "ĠJam ie", + "15 2", + "ĠIN T", + "Cont act", + "Ġaccount ed", + "h ash", + "ĠPack ers", + "p ires", + "Ġles bian", + "Ġamend ments", + "Ġhop eful", + "ĠFin land", + "Ġspot light", + "Ġconfig ured", + "Ġtrou bled", + "Ġg aze", + "ĠCal gary", + "Ġrel iability", + "Ġins urg", + "sw er", + "b uy", + "ĠSk in", + "Ġp ixels", + "Ġhand gun", + "Ġpar as", + "Ġcateg or", + "ĠE L", + "ĠRe x", + "Ind eed", + "Ġkind a", + "Ġconj unction", + "ĠBry an", + "ĠMan ufact", + "y ang", + "Pl us", + "S QL", + "ish ment", + "Ġdom inate", + "Ġn ail", + "Ġo ath", + "Ġeru pt", + "ĠF ine", + "it bart", + "ĠCh ip", + "ĠAb d", + "ĠN am", + "Ġbuy er", + "Ġdiss ent", + "Le aks", + "Cont in", + "Ġr ider", + "ĠSome one", + "Ġill usion", + "c in", + "ĠBoe ing", + "Ġin adequ", + "ov ation", + "i ants", + "Ġreb uild", + "4 50", + "ĠDest iny", + "S W", + "ĠT ill", + "H it", + "ia z", + "ĠBang l", + "acher s", + "ĠRe form", + "Ġse gments", + "Ġsystem atic", + "d c", + "ĠConserv atives", + "Ġport al", + "h or", + "ĠDragon bound", + "Ġdrag ged", + "om o", + "Ġthe e", + "ad vert", + "ĠRep orts", + "ĠE t", + "Ġbarrel s", + "Aug ust", + "Ġcompar isons", + "Ġhe x", + "Ġan throp", + "\" [", + "bor ough", + "ab i", + "Ġpict ured", + "play ing", + "ĠAdd ress", + "ĠMir ror", + "Sm ith", + "Ġt ires", + "ĠN PR", + "AA AA", + "Ġclass ification", + "ĠTh an", + "ĠH arm", + "ĠR A", + "Ġreject ion", + "min ation", + "Ġr anged", + "ĠF alls", + "D I", + "H ost", + "ãĤ ´", + "ĠEx ample", + "list ed", + "th irds", + "Ġsaf egu", + "br and", + "Ġprob able", + "Can ada", + "IT ION", + "ĠQ aeda", + "Ġch ick", + "Ġimport s", + "h it", + "l oc", + "W W", + "Ġble w", + "Ġany time", + "Ġwh oles", + "ik ed", + "Ġcal culation", + "cre ate", + "ĠO ri", + "Ġupgr aded", + "Ġapp ar", + "ut ory", + "ĠM ol", + "B rit", + "ĠJ ong", + "IN AL", + "ĠStart ing", + "Ġd ice", + "urt le", + "Ġre lying", + "cl osure", + "Ġprof itable", + "Ġsl aughter", + "ĠMan ual", + "c aster", + "Ġ\" $", + "Ġfe ather", + "ĠSim ply", + "ie ves", + "Ġdeter ior", + "ĠPC I", + "Ġst amp", + "Ġfl aws", + "Ġsh ade", + "ham mer", + "Ġpass port", + "Ġcont ing", + "am el", + "Ġobser vers", + "Ġneg lect", + "ĠR B", + "ĠBrother hood", + "Ġskept ical", + "f amily", + "us k", + "Ġemotion ally", + "â Ļ", + "ĠBet a", + "ason able", + "id ity", + "ĠM ul", + "Ġkick ing", + "ĠC arm", + "oll ah", + "VERT IS", + "ĠAt hen", + "Ġlad der", + "ĠBul let", + "å £", + "00 01", + "ĠWild life", + "ĠM ask", + "ĠN an", + "R ev", + "Ġun acceptable", + "leg al", + "Ġcrowd ed", + "ag i", + "ĠC ox", + "j e", + "Ġmor ality", + "Ġfu els", + "Ġc ables", + "Ġman kind", + "ĠCarib bean", + "Ġanch or", + "Ġby te", + "ĠO ften", + "ĠO z", + "Ġcraft ed", + "Ġhistor ian", + "ĠW u", + "Ġtow ers", + "ĠCitiz ens", + "Ġhel m", + "Ġcred entials", + "Ġsing ular", + "ĠJes se", + "Ġtack les", + "Ġcont empt", + "Ġa fore", + "ĠSh adows", + "Ġn il", + "Ġur gent", + "app le", + "bl ood", + "Ġv on", + "Ġoff line", + "Ġbreat he", + "Ġj umps", + "Ġirre levant", + "ox ic", + "om al", + "import ant", + "J im", + "Ġgl oves", + "arm ing", + "dep th", + "Ġtal ents", + "ook ie", + "ĠS B", + "Ġpal m", + "uff s", + "est a", + "IG H", + "Ġcan on", + "ĠVer izon", + "ĠP le", + "Ġcou pled", + "vel t", + "Ġfundra ising", + "ĠGet ting", + "ĠD LC", + "Ġmathemat ical", + "ĠH S", + "ĠCard inals", + "te lling", + "Ġspons ors", + "Ġ Ï", + "ĠBull s", + "op tion", + "Ġprop ose", + "Ġmem orable", + "Ġembr aced", + "Ġdecl ining", + "He alth", + "ed a", + "Ġ} ;", + "Ġsp am", + "m ile", + "Ġpit cher", + "ĠE ight", + "Ġcar ing", + "ut ic", + "ro le", + "Ġair line", + "ernand ez", + "ĠAth let", + "Ġcert ification", + "ux e", + "rig er", + "Ġem pir", + "Ġsens ation", + "Ġdis m", + "Ġb olt", + "Ġev olve", + "H ouse", + "Ġconsult ation", + "ĠD uty", + "Ġtou ches", + "ĠN athan", + "Ġf aint", + "h ad", + "\" (", + "ĠCons umer", + "ĠExt reme", + "Ġ12 7", + "ĠHer m", + "ĠSac rament", + "iz oph", + "Ġanx ious", + "ul ously", + "Ġsoc ially", + "ĠU TC", + "Ġsol ving", + "ĠLet ter", + "Hist ory", + "ed uc", + "Pr ice", + ") );", + "Ġrel oad", + "am ic", + "Ġp ork", + "Ġdisc ourse", + "Ġt ournaments", + "ai ro", + "ĠK ur", + "ĠCost a", + "Ġviol ating", + "Ġinterf ere", + "Ġrecre ational", + "uff le", + "Ġspe eches", + "Ġneed ing", + "Ġremem bers", + "Ġcred ited", + "n ia", + "f ocused", + "amer a", + "Ġb ru", + "um bs", + "ĠCub an", + "Ġpreced ing", + "Ġnons ense", + "ac ial", + "Ġsmart phones", + "ĠSt ories", + "S ports", + "ĠEmer gency", + "oun cing", + "ef ined", + "Ġb er", + "Ġconsult ing", + "Ġm asters", + "he astern", + ".\" [", + "ĠRun ning", + "Ġsus cept", + "ĠF eng", + "Americ a", + "pr ises", + "st itial", + "ĠWeek ly", + "ĠGreat er", + "mod ules", + "if ter", + "G raphics", + "ul er", + "Ġwho lly", + "Ġsupp ress", + "Ġconce aled", + "Ġhapp ily", + "Ġaccept s", + "ĠEn joy", + "Ġr ivers", + "ĠEx cept", + "2 25", + "ĠN HS", + "ĠMc Connell", + "Ġp ussy", + "fer red", + "ut able", + "Ġatt ain", + "Ġ> =", + "Ġdepos its", + "roph ic", + "Ġnot orious", + "ĠSh aw", + "il itation", + "Ġepid emic", + "all ic", + "Ġsmall est", + "ov ich", + "Ġaccess ories", + "per ties", + "Ġsur plus", + "ĠMe ch", + "Ġamb ig", + "ĠImm igration", + "Ġch im", + "ev al", + "Ġpract icing", + "ĠMyster y", + "Ġdom ains", + "ĠSil icon", + "app s", + "Ġkilomet ers", + "e a", + "ĠSm ash", + "Ġwarrant y", + "Ġn ost", + "s il", + "re v", + "J on", + "ĠDub lin", + "Ġtast es", + "Ġb out", + "g reat", + "er ror", + "Ġsw itches", + "ĠB apt", + "D O", + "ok i", + "Ġsour ced", + "pro du", + "Ġattach ment", + "ĠIss ue", + "ĠQuest ion", + "Jo in", + "Ġf itted", + "Ġunlaw ful", + "^ ^", + "ere k", + "Ġauthent ication", + "Ġst ole", + "Ġaccount ability", + "l abel", + "S earch", + "Ġal beit", + "atic an", + "fund ed", + "ĠAdd ing", + "ĠI Q", + "Ġsub mar", + "l it", + "a que", + "ĠLear ning", + "Ġint eger", + "M aster", + "ĠCh rom", + "Ġprem ier", + "O p", + "ĠLi u", + "Ġbl essed", + "ĠGl obe", + "ĠResp onse", + "Ġlegit im", + "ĠMer kel", + "Ġdispos al", + " ´", + "Ġgau ge", + "pe at", + "Ġindu ced", + "Ġquestion able", + "arth y", + "ĠV it", + "ĠF eed", + "U ntil", + "U t", + "worth y", + "R Y", + "ĠH erald", + "ĠHam mer", + "Ġmed al", + "ĠR ivers", + "ĠH ack", + "Ġclar ify", + "Ġtrack ed", + "Ġautonom ous", + "Ġten ant", + "ĠQ atar", + "er ie", + "Ġgr im", + "ĠMon itor", + "Ġresist ant", + "ĠSpe c", + "ĠWell s", + "N AS", + "14 8", + "Ġmin ers", + "iot ics", + "Ġmiss es", + "11 6", + "g ian", + "g it", + "ĠE yes", + "p res", + "Ġgrad uated", + "Ġang el", + "Ġsyn chron", + "Ġefficient ly", + "Ġtrans mitted", + "H arry", + "Ġglob ally", + "EN CE", + "ĠMont ana", + "r aged", + "ĠPre vention", + "Ġp iss", + "ĠL l", + "Ġshe lf", + "ĠB JP", + "ĠTest ament", + "ĠL ate", + "ik er", + "ĠH app", + "ĠJul ian", + "h all", + "Ġsp ont", + "Ġshut down", + "Ġincons istent", + "Ġsubscrib ers", + "Ġske leton", + "ĠNe braska", + "Ġins pire", + "ĠV oid", + "F eed", + "Ġang les", + "ĠSpr ings", + "Ġbench mark", + "Ġvacc ines", + "izoph ren", + "se xual", + "uff ed", + "Ġsh ine", + "ĠK ath", + "Ġgest ure", + "ine a", + "Ġr ip", + "Ġopp ression", + "Ġcons cience", + "b t", + "ĠL um", + "Ġinc idence", + "ĠF a", + "w r", + "Ġmin eral", + "ĠSp urs", + "alk y", + "Ġth under", + "Ġop io", + "Be ing", + "ĠPal m", + "Ġwas ted", + "Ġl b", + "i aries", + "ĠIniti ative", + "Ġcur ric", + "Ġmark er", + "ĠMc L", + "Ġext ensions", + "ĠP v", + "ĠAr ms", + "Ġoffer ings", + "Ġdef enses", + "Ġvend or", + "Ġcontrad ict", + "ĠCol in", + "Ġredd it", + "Ġper ipher", + "12 2", + "Ġs ins", + "E dit", + "IC T", + "So ft", + "ĠSh ah", + "Ġadministr ator", + "ĠT rip", + "Ġporn ography", + "Ġtu ition", + "in ence", + "ĠPro gress", + "Ġcat alog", + "Ġsu ite", + "Ġh ike", + "Ġreprodu ctive", + "eng ine", + "Ġd rought", + "ĠNo ah", + "Ġ2 30", + "Ġd ude", + "Ġrelax ed", + "Ġpart ition", + "Ġparticip ant", + "Ġtel esc", + "Ġfe as", + "ĠF F", + "own er", + "Ġswe eping", + "Ġl enses", + "Ġmatch up", + "ĠRe pl", + "ourn als", + "Ġcred ible", + "Ġgrand mother", + "Ġther mal", + "Ġsubscrib ing", + "Ġident ities", + "col m", + "U CT", + "Ġreluct ant", + "us ers", + "ĠC ort", + "Ġassist ed", + "OS S", + "ATION S", + "IS H", + "Ġpharm aceutical", + "ic able", + "ad ian", + "ĠSon ic", + "ĠF ury", + "ĠM ong", + "A H", + "ĠPsych ology", + "Ġph osph", + "Ġtreat s", + "Ń Ķ", + "Ġstead ily", + "ĠHell o", + "Ġrel ates", + "Ġcl ue", + "Ex pl", + "a uth", + "Ġrev ision", + "Ġe ld", + "os ion", + "Ġbr on", + "14 4", + "ri kes", + "Ġmin es", + "Ġblank et", + "ĠF ail", + "el ed", + "ĠIm agine", + "ĠPl anned", + "a ic", + "Re quest", + "M ad", + "ĠHor se", + "ĠEag le", + "Ġcap ac", + "15 7", + "Ġl ing", + "ĠN ice", + "ĠP arenthood", + "min ster", + "og s", + "ens itive", + "Not hing", + "Ġcar n", + "F in", + "ĠP E", + "Ġr ifles", + "ĠL P", + "S and", + "Ġgui Active", + "Ġtour ist", + "C NN", + "Ġunve iled", + "Ġpredec essor", + "} {", + "u ber", + "Ġoff shore", + "Ġopt ical", + "ĠR ot", + "ĠPear l", + "et on", + "Ġst ared", + "Ġfart her", + "at ility", + "cont in", + "ĠG y", + "ĠF oster", + "ĠC oc", + "ri ents", + "Ġdesign ing", + "ĠEconom y", + "ON G", + "W omen", + "ĠN ancy", + "er ver", + "Ġmas cul", + "Ġcasual ties", + "Ġ2 25", + "ĠS ullivan", + "ĠCh oice", + "Ġa ster", + "w s", + "Ġhot els", + "Ġconsider ations", + "Ġcou ch", + "ĠSt rip", + "ĠG n", + "Ġmanip ulate", + "l ied", + "Ġsynt hetic", + "Ġassault ed", + "Ġoff enses", + "ĠDra ke", + "Ġim pe", + "Oct ober", + "ĠHer itage", + "h l", + "ĠBl air", + "Un like", + "Ġg rief", + "Ġ4 50", + "Ġopt ed", + "Ġresign ation", + "il o", + "Ġver se", + "ĠT omb", + "Ġu pt", + "Ġa ired", + "ĠH ook", + "ĠML B", + "Ġassum es", + "out ed", + "ĠV ers", + "Ġinfer ior", + "Ġbund le", + "ĠD NS", + "ograp her", + "Ġmult ip", + "ĠSoul s", + "Ġillust rated", + "Ġtact ic", + "Ġdress ing", + "Ġdu o", + "Con f", + "Ġrel ent", + "Ġc ant", + "Ġscar ce", + "Ġcand y", + "ĠC F", + "Ġaffili ated", + "Ġspr int", + "yl an", + "ĠGarc ia", + "Ġj unk", + "Pr int", + "ex ec", + "C rit", + "Ġport rait", + "ir ies", + "ĠOF F", + "Ġdisp utes", + "W R", + "L ove", + "ãģ Ħ", + "ĠRe yn", + "Ġh ipp", + "op ath", + "Ġflo ors", + "ĠFe el", + "Ġwor ries", + "Ġsett lements", + "ĠP os", + "Ġmos que", + "Ġfin als", + "Ġcr ushed", + "ĠPro bably", + "ĠB ot", + "ĠM ans", + "ĠPer iod", + "Ġsovere ignty", + "Ġsell er", + "Ġap ost", + "Ġam ateur", + "Ġd orm", + "Ġconsum ing", + "Ġarm our", + "ĠRo ose", + "Ġint ensive", + "Ġelim inating", + "ĠSun ni", + "ĠAle ppo", + "j in", + "Ġadv ise", + "p al", + "ĠH alo", + "Ġdes cent", + "Ġsimpl er", + "Ġbo oth", + "ST R", + "L ater", + "ĠC ave", + "== =", + "Ġm ol", + "Ġf ist", + "Ġshot gun", + "su pp", + "Ġrob bery", + "E ffect", + "Ġobsc ure", + "ĠProf essional", + "Ġemb assy", + "Ġmilit ant", + "Ġinc arcer", + "Ġgener ates", + "Ġlaun ches", + "Ġadministr ators", + "Ġsh aft", + "Ġcirc ular", + "Ġfresh man", + "ĠW es", + "ĠJo el", + "ĠD rew", + "ĠDun can", + "ĠApp arently", + "s ight", + "ĠIntern al", + "ĠInd ividual", + "ĠF E", + "Ġb ore", + "ĠM t", + "Ġbroad ly", + "ĠO ptions", + "ount ain", + "ip es", + "ĠV ideos", + "20 4", + "Ġh ills", + "Ġsim ulation", + "Ġdisappoint ment", + "it an", + "ĠLabor atory", + "Ġup ward", + "Ġbound ary", + "Ġdark er", + "h art", + "Ġdomin ance", + "C ong", + "ĠOr acle", + "ĠL ords", + "Ġscholars hip", + "ĠVin cent", + "ed e", + "ĠR ah", + "Ġencour ages", + "ro v", + "Ġqu o", + "Ġprem ise", + "ĠCris is", + "ĠHol ocaust", + "Ġrhyth m", + "Ġmet ric", + "cl ub", + "Ġtransport ed", + "Ġn od", + "ĠP ist", + "Ġancest ors", + "ĠFred er", + "th umbnails", + "ĠC E", + "ON D", + "Ph il", + "ven ge", + "ĠProduct s", + "cast le", + "Ġqual ifying", + "ĠK aren", + "VERTIS EMENT", + "Ġmight y", + "Ġexplan ations", + "Ġfix ing", + "D i", + "Ġdecl aring", + "Ġanonym ity", + "Ġju ven", + "ĠN ord", + "ĠDo om", + "ĠAct ually", + "O k", + "ph is", + "ĠDes ert", + "Ġ11 6", + "I K", + "ĠF M", + "Ġinc omes", + "V EL", + "ok ers", + "Ġpe cul", + "Ġlight weight", + "g ue", + "Ġacc ent", + "Ġincre ment", + "ĠCh an", + "Ġcompl aining", + "ĠB aghd", + "Ġmidfield er", + "Ġover haul", + "Pro cess", + "ĠH ollow", + "ĠTit ans", + "Sm all", + "man uel", + "ĠUn ity", + "ĠEv ents", + "S ty", + "Ġdispro portion", + "n esty", + "en es", + "ĠC od", + "Ġdemonstr ations", + "ĠCrim son", + "ĠO H", + "Ġen rolled", + "Ġc el", + "ĠBre tt", + "Ġa ide", + "Ġhe els", + "Ġbroad band", + "Ġmark ing", + "Ġw izard", + "ĠN J", + "ĠChief s", + "Ġingred ient", + "Ġd ug", + "ĠSh ut", + "urch ase", + "end or", + "Ġfar mer", + "ĠGold man", + "12 9", + "15 5", + "Or der", + "Ġl ion", + "i ably", + "Ġst ain", + "ar ray", + "ilit ary", + "ĠFA Q", + "Ġexpl oded", + "ĠMcC arthy", + "ĠT weet", + "ĠG reens", + "ek ing", + "l n", + "ens en", + "Ġmotor cycle", + "Ġpartic le", + "Ġch olesterol", + "B ron", + "Ġst air", + "Ġox id", + "Ġdes irable", + "ib les", + "Ġthe or", + "for cing", + "Ġpromot ional", + "ov o", + "b oot", + "ĠBon us", + "raw ling", + "Ġshort age", + "ĠP sy", + "Ġrecru ited", + "Ġinf ants", + "Ġtest osterone", + "Ġded uct", + "Ġdistinct ive", + "Ġfirm ware", + "bu ilt", + "14 5", + "Ġexpl ored", + "Ġfact ions", + "Ġv ide", + "Ġtatt oo", + "Ġfinan cially", + "Ġfat igue", + "Ġproceed ing", + "const itutional", + "Ġmis er", + "Ġch airs", + "gg ing", + "ipp le", + "Ġd ent", + "Ġdis reg", + "ç Ķ", + "st ant", + "ll o", + "b ps", + "aken ing", + "Ġab normal", + "ĠE RA", + "å£ «", + "ĠH BO", + "ĠM AR", + "Ġcon cess", + "Ġserv ant", + "Ġas pir", + "l av", + "ĠPan el", + "am o", + "Ġprec ip", + "Ġrecord ings", + "Ġproceed ed", + "Ġcol ony", + "ĠT ang", + "ab lo", + "Ġstri pped", + "Le ft", + "to o", + "Ġpot atoes", + "Ġfin est", + "% ).", + "Ġc rap", + "ĠZ ach", + "ab ases", + "ĠG oth", + "Ġbillion aire", + "w olf", + "Ġsan ction", + "S K", + "Ġlog ged", + "P o", + "ey ed", + "un al", + "Ġcr icket", + "Ġarm ies", + "Ġunc overed", + "Cl oud", + "ó n", + "Ġreb ounds", + "Ġm es", + "O per", + "P ac", + "Ġnation ally", + "Ġinsert ed", + "p ict", + "Ġgovern ance", + "Ð ¸", + "Ġprivile ges", + "G ET", + "Ġfavor ites", + "im ity", + "Ġlo ver", + "the m", + "em pl", + "Ġgorge ous", + "An n", + "Ġsl ipped", + "Ġve to", + "B ob", + "Ġsl im", + "u cc", + "ĠF ame", + "udden ly", + "Ġden ies", + "ĠM aur", + "Ġdist ances", + "Ġw anna", + "t ar", + "ĠS ER", + "Ġâ Ī", + "Ġle mon", + "at hetic", + "Ġlit eral", + "Ġdistingu ished", + "Ġansw ering", + "G I", + "Ġrelig ions", + "ĠPhil os", + "ĠL ay", + "Ġcomp os", + "ire ments", + "ĠK os", + "ine z", + "roll ing", + "Ġyoung est", + "and ise", + "ĠB orn", + "Ġalt ar", + "am ina", + "ĠB oot", + "v oc", + "Ġdig ging", + "Ġpress ures", + "Ġl en", + "26 4", + "Ġassass ination", + "ĠBir mingham", + "ĠMy th", + "Ġsovere ign", + "ĠArt ist", + "ĠPhot ograph", + "Ġdep icted", + "Ġdisp ens", + "orth y", + "Ġamb ul", + "int eg", + "ĠC ele", + "ĠTib et", + "Ġhier archy", + "Ġc u", + "Ġpre season", + "ĠPet erson", + "Ġcol ours", + "Ġworry ing", + "Ġback ers", + "ĠPal mer", + "ĠÎ ¼", + "Ġcontribut or", + "Ġhear ings", + "Ġur ine", + "Ġ Ù", + "ourge ois", + "Sim ilar", + "ĠZ immer", + "s omething", + "ĠUS C", + "Ġstrength s", + "ĠF I", + "Ġlog ging", + "As ked", + "ĠTh ai", + "in qu", + "ĠW alt", + "Ġcrew s", + "it ism", + "3 01", + "Ġshar ply", + "um ed", + "Ġred irect", + "r ators", + "In f", + "ĠWe apons", + "Ġte asp", + "19 99", + "L ive", + "ĠEs pecially", + "ĠS ter", + "ĠVeter ans", + "Ġint ro", + "other apy", + "Ġmal ware", + "Ġbre eding", + "Ġmole cular", + "ĠR oute", + "ĠCom ment", + "oc hem", + "Ġa in", + "Se ason", + "Ġlineback er", + "Ä «", + "ĠEconom ics", + "es ar", + "ĠL ives", + "ĠEm ma", + "Ġk in", + "ĠTer rit", + "Ġpl anted", + "ot on", + "ĠBut ter", + "ĠSp ons", + "P ER", + "Ġdun geon", + "Ġsymb olic", + "Ġfil med", + "Ġdi ets", + "Ġconclud es", + "Ġcertain ty", + "ĠForm at", + "Ġstr angers", + "form at", + "ĠPh ase", + "Ġcop ied", + "Ġmet res", + "ld a", + "ĠUs ers", + "Ġdeliber ate", + "Ġwas hed", + "ĠL ance", + "im ation", + "Ġimpro per", + "ĠGen esis", + "ick r", + "ĠK ush", + "Ġreal ise", + "Ġembarrass ing", + "alk ing", + "b ucks", + "Ġver ified", + "Ġout line", + "year s", + "ĠIn come", + "20 2", + "Ġz ombies", + "F inal", + "ĠMill enn", + "Ġmod ifications", + "ĠV ision", + "ĠM oses", + "ver b", + "iter ranean", + "ĠJ et", + "Ġnav al", + "ĠA gg", + "Ġur l", + "Ġvict ories", + "Ġnon etheless", + "Ġinj ust", + "ĠF act", + "ç ļ", + "Ġins ufficient", + "re view", + "face book", + "Ġnegoti ating", + "Ġguarant ees", + "im en", + "uten berg", + "Ġg ambling", + "Ġcon gr", + "Load ing", + "Ġnever theless", + "Ġpres idents", + "ĠIndust rial", + "Ġ11 8", + "Ġp oured", + "ĠT ory", + "Ġ17 5", + "Ġ: =", + "Sc ott", + "ange red", + "T ok", + "Ġorgan izers", + "M at", + "ĠG rowth", + "Ġad ul", + "Ġens ures", + "Ġ11 7", + "é¾į å", + "Ġmass acre", + "Ġgr ades", + "be fore", + "AD VERTISEMENT", + "ĠSl ow", + "ĠM MA", + "âĢĶ \"", + "ĠV atican", + "Q aeda", + "Ġo we", + "66 66", + "ĠS orry", + "ĠGr ass", + "Ġbackground s", + "Ġexha usted", + "Ġcl an", + "Ġcomprom ised", + "ĠE lf", + "ĠIsa ac", + "ens on", + "In vest", + "IF A", + "Ġinterrupt ed", + "ãĥī ãĥ©", + "Ġtw isted", + "ĠDrag ons", + "M ode", + "ĠK remlin", + "Ġfert il", + "he res", + "ph an", + "ĠN ode", + "f ed", + "ĠOr c", + "Ġunw illing", + "C ent", + "Ġprior it", + "Ġgrad uates", + "Ġsubject ive", + "Ġiss uing", + "ĠL t", + "Ġview er", + "Ġw oke", + "Th us", + "bro ok", + "Ġdep ressed", + "Ġbr acket", + "ĠG or", + "ĠFight ing", + "Ġstri ker", + "Rep ort", + "ĠPortug al", + "Ġne o", + "w ed", + "19 9", + "Ġflee ing", + "sh adow", + "ident ified", + "US E", + "Ste am", + "Ġstret ched", + "Ġrevel ations", + "art ed", + "ĠD w", + "Ġalign ment", + "est on", + "ĠJ ared", + "S ep", + "Ġblog s", + "up date", + "g om", + "r isk", + "Ġcl ash", + "ĠH our", + "Ġrun time", + "Ġunw anted", + "Ġsc am", + "Ġr ack", + "Ġen light", + "on est", + "ĠF err", + "Ġconv ictions", + "Ġp iano", + "Ġcirc ulation", + "ĠW elcome", + "Ġback lash", + "ĠW ade", + "Ġrece ivers", + "ot ive", + "J eff", + "Ġnetwork ing", + "ĠPre p", + "ĠExpl orer", + "Ġlect ure", + "Ġupload ed", + "ĠMe at", + "B LE", + "ĠNaz is", + "ĠSy nd", + "st ud", + "ro ots", + "ri ans", + "Ġportray ed", + "Ġ ??", + "ĠBudd ha", + "s un", + "Rober t", + "ĠCom plex", + "Ġover see", + "Ġste alth", + "T itle", + "ĠJ obs", + "ĠK um", + "Ġappreci ation", + "ĠM OD", + "Ġbas ics", + "Ġcl ips", + "Ġnurs ing", + "Ġpropos ition", + "Ġreal ised", + "ĠNY C", + "Ġall ocated", + "ri um", + "ar an", + "ĠPro duction", + "ĠV ote", + "Ġsm ugg", + "Ġhun ter", + "az er", + "ĠCh anges", + "Ġfl uct", + "y on", + "Ar ray", + "Ġk its", + "W ater", + "Ġuncom mon", + "Ġrest ing", + "ell s", + "w ould", + "Ġpurs ued", + "Ġassert ion", + "omet own", + "ĠMos ul", + "ĠPl atform", + "io let", + "Ġshare holders", + "Ġtra ils", + "P ay", + "ĠEn forcement", + "ty pes", + "ĠAn onymous", + "Ġsatisf ying", + "il ogy", + "Ġ( '", + "w ave", + "c ity", + "Ste ve", + "Ġconfront ation", + "ĠE ld", + "C apt", + "ah an", + "ht m", + "ĠC trl", + "ON S", + "2 30", + "if a", + "hold ing", + "Ġdelic ate", + "Ġj aw", + "ĠGo ing", + "or um", + "S al", + "Ġd ull", + "ĠB eth", + "Ġpr isons", + "Ġe go", + "ĠEl sa", + "avor ite", + "ĠG ang", + "ĠN uclear", + "Ġsp ider", + "ats u", + "Ġsam pling", + "Ġabsor bed", + "ĠPh arm", + "iet h", + "Ġbuck et", + "ĠRec omm", + "O F", + "ĠF actory", + "AN CE", + "Ġb acter", + "H as", + "ĠObs erv", + "12 1", + "Ġprem iere", + "De velop", + "Ġcur rencies", + "C ast", + "Ġaccompany ing", + "ĠNash ville", + "Ġfat ty", + "ĠBre nd", + "Ġloc ks", + "Ġcent ered", + "ĠU T", + "augh s", + "or ie", + "ĠAff ordable", + "v ance", + "D L", + "em et", + "Ġthr one", + "ĠBlu etooth", + "Ġn aming", + "if ts", + "AD E", + "Ġcorrect ed", + "Ġprompt ly", + "ĠST R", + "Ġgen ome", + "Ġcop e", + "Ġval ley", + "Ġround ed", + "ĠK end", + "al ion", + "p ers", + "Ġtour ism", + "Ġst ark", + "v l", + "Ġblow ing", + "ĠSche dule", + "st d", + "Ġunh appy", + "Ġlit igation", + "ced es", + "Ġand roid", + "Ġinteg ral", + "ere rs", + "ud ed", + "t ax", + "Ġre iter", + "ĠMot ors", + "oci ated", + "Ġwond ers", + "ĠAp ost", + "uck ing", + "ĠRoose velt", + "f ram", + "Ġyield s", + "Ġconstit utes", + "aw k", + "Int erest", + "Ġinter im", + "Ġbreak through", + "ĠC her", + "Ġpro sec", + "ĠD j", + "ĠM T", + "Res p", + "ĠP T", + "Ġs perm", + "ed it", + "B T", + "Lin ux", + "count ry", + "le ague", + "Ġd ick", + "Ġo ct", + "Ġinsert ing", + "Ġsc ra", + "ĠBrew ing", + "Ġ19 66", + "Ġrun ners", + "Ġpl un", + "id y", + "ĠD ian", + "Ġdys function", + "Ġex clusion", + "Ġdis gr", + "Ġincorpor ate", + "Ġrecon c", + "Ġnom inated", + "ĠAr cher", + "d raw", + "achel or", + "Ġwrit ings", + "Ġshall ow", + "Ġh ast", + "ĠB MW", + "ĠR S", + "Ġth igh", + "Ġ19 63", + "Ġl amb", + "Ġfav ored", + "ag le", + "Ġcool er", + "ĠH ours", + "ĠG U", + "ĠOrig in", + "Ġglim pse", + "---------------- ----", + "L im", + "Ġche ek", + "Ġj ealous", + "- '", + "Ġhar ness", + "ĠPo ison", + "Ġdis abilities", + "ne apolis", + "Ġout look", + "Ġnot ify", + "ĠIndian apolis", + "Ġab rupt", + "ns ic", + "Ġenc rypted", + "Ġfor fe", + "reat h", + "Ġr abb", + "Ġfound ations", + "Ġcompl iment", + "ĠInter view", + "ĠS we", + "Ġad olesc", + "Ġmon itors", + "ĠSacrament o", + "Ġtime ly", + "Ġcontem pl", + "Ġposition ed", + "Ġpost ers", + "ph ies", + "iov ascular", + "v oid", + "ĠFif th", + "Ġinvestig ative", + "OU N", + "Ġinteg rate", + "ĠIN C", + "ish a", + "ibl ings", + "ĠRe quest", + "ĠRodrig uez", + "Ġsl ides", + "ĠD X", + "Ġfemin ism", + "Ġdat as", + "Ġb end", + "ir us", + "ĠNig eria", + "F ox", + "Ch ange", + "Ġair plane", + "ĠLad en", + "Ġpublic ity", + "ixt y", + "Ġcommit ments", + "Ġaggreg ate", + "Ġdisplay ing", + "ĠAr row", + "Ġ12 2", + "Ġrespect s", + "and roid", + "s ix", + "ĠSh a", + "Ġrest oration", + ") \\", + "W S", + "oy s", + "Ġillust rate", + "with out", + "12 6", + "ĠâĶ Ĥ", + "Ġpick up", + "n els", + "Ġ ....", + "f ood", + "ĠF en", + ") ?", + "Ġphenomen a", + "Ġcompan ions", + "ĠW rite", + "Ġsp ill", + "Ġbr idges", + "ĠUp dated", + "ĠF o", + "Ġinsect s", + "ASH INGTON", + "Ġsc are", + "il tr", + "ĠZh ang", + "Ġsever ity", + "Ġind ul", + "14 9", + "ĠCo ffee", + "Ġnorm s", + "Ġp ulse", + "ĠF T", + "Ġhorr ific", + "ĠDest roy", + "ĠJ SON", + "Ġo live", + "Ġdiscuss es", + "R est", + "E lect", + "ĠW inn", + "ĠSurv iv", + "ĠH ait", + "S ure", + "op ed", + "Ġro oted", + "ĠS ke", + "ĠBron ze", + "Ġl ol", + "Def ault", + "Ġcommod ity", + "red ited", + "Ġliber tarian", + "Ġforb idden", + "Ġgr an", + "à ¨", + "Ġl ag", + "en z", + "dri ve", + "Ġmathemat ics", + "Ġw ires", + "Ġcrit ically", + "Ġcarb ohyd", + "ĠChance llor", + "ĠEd die", + "Ġban ning", + "ĠF ri", + "Ġcompl ications", + "et ric", + "ĠBangl adesh", + "Ġband width", + "St op", + "ĠOrig inally", + "Ġhalf way", + "yn asty", + "sh ine", + "Ġt ales", + "rit ies", + "av ier", + "Ġspin ning", + "ĠWH O", + "Ġneighbour hood", + "b ach", + "Ġcommer ce", + "ĠS le", + "B U", + "Ġentreprene ur", + "Ġpecul iar", + "ĠCom ments", + "f re", + "3 20", + "IC S", + "Ġimag ery", + "ĠCan on", + "ĠElect ronic", + "sh ort", + "( (", + "D ig", + "Ġcomm em", + "u ced", + "Ġincl ined", + "ĠSum mon", + "Ġcl iff", + "ĠMed iterranean", + "Ġpo etry", + "Ġprosper ity", + "ĠRe ce", + "Ġp ills", + "m ember", + "Ġfin ale", + "un c", + "ĠG ig", + "ä ½", + "Ġl od", + "Ġback ward", + "- +", + "ĠFor ward", + "Ġth ri", + "s ure", + "Ġso ap", + "ĠF X", + "R ES", + "ĠSe xual", + "oul os", + "Ġfool ish", + "Ġright eous", + "Ġco ff", + "terror ism", + "ust ain", + "ot er", + "Ġab uses", + "ne xt", + "Ġab usive", + "Ġthere after", + "Ġprohib ition", + "ĠS UP", + "Ġd ip", + "Ġr ipped", + "Ġinher ited", + "Ġb ats", + "st ru", + "G T", + "Ġflaw ed", + "ph abet", + "Ġf og", + "do ors", + "Ġim aging", + "Ġdig its", + "ĠHung ary", + "Ġar rog", + "Ġteach ings", + "Ġprotocol s", + "ĠB anks", + "à ¸", + "p ound", + "ĠC urt", + ".\" )", + ". /", + "Ġex emption", + "end ix", + "ĠM ull", + "Ġimpro ves", + "ĠG amer", + "d imensional", + "I con", + "ĠMarg aret", + "St atus", + "d ates", + "Ġint ends", + "Ġdep ict", + "Ġpark ed", + "J oe", + "ĠMar ines", + "chn ology", + "! ).", + "Ġjud ged", + "Ġwe ights", + "R ay", + "Ġapart ments", + "he ster", + "Ġrein force", + "Ġoff ender", + "occ up", + "Ġs ore", + "e pt", + "ĠPH P", + "ĠB row", + "Ġauthor ization", + "ĠR isk", + "ĠDel aware", + "ĠQ U", + "Ġnot ifications", + "Ġsun light", + "Ġex clude", + "d at", + "Ġm esh", + "ĠSud an", + "Ġbelong ed", + "Ġsub way", + "Ġno on", + "ĠInter ior", + "ol ics", + "ĠL akers", + "Ġc oding", + "Dis claimer", + "Cal if", + "O ld", + "Ġdis l", + "???? ?", + "Ġconfir ms", + "Ġrecruit ment", + "Ġhom icide", + "Cons ider", + "ĠJeff rey", + "ft y", + "} ;", + "Ġobject ion", + "do ing", + "ĠLe o", + "W ant", + "Ġgl ow", + "ĠClar ke", + "ĠNorm an", + "Ġver ification", + "Ġpack et", + "ĠForm ula", + "Ġpl ag", + "es ville", + "Ġshout ing", + "Ġo v", + "ĠR EC", + "ĠB ub", + "Ġn inth", + "Ġener g", + "Ġvalid ity", + "Ġup s", + "j ack", + "Ġneighbor ing", + "ĠN ec", + "ew orks", + "ĠH ab", + "are z", + "Ġsp ine", + "Ġevent ual", + "ĠLe aders", + "ĠC arn", + "Ġprob ation", + "Ġrom ance", + "ms g", + "ĠMechan ical", + "ER Y", + "R ock", + "Ġpart isan", + "N ode", + "ass ets", + "min ent", + "Ġforeign ers", + "Ġtest ify", + "ĠUs ually", + "l ords", + "ĠG ren", + "ĠPow ell", + "BI L", + "Ġs r", + "Ġadd ict", + "Ġshell s", + "Ġs igh", + "ĠY ale", + "tern ity", + "Ġ7 50", + "E U", + "ĠR ifle", + "Ġpat ron", + "em a", + "ĠB annon", + "an ity", + "Ġtrop ical", + "ĠV II", + "c ross", + "Every thing", + "ĠIS O", + "Ġhum ble", + "ass ing", + "ĠF IG", + "Ġupd ating", + "ys on", + "Ġcal cium", + "Ġcompet ent", + "Ġste ering", + "Pro t", + "ĠS Y", + "ĠFin als", + "ĠR ug", + "15 9", + "13 7", + "ĠG olf", + "Ġ12 6", + "Ġaccommod ation", + "ĠHug hes", + "Ġaest hetic", + "art isan", + "ĠTw ilight", + "Ġpr ince", + "ĠAgric ulture", + "ĠDis co", + "Ġpreced ent", + "Ġtyp ing", + "author ized", + "O ption", + "ĠA ub", + "l ishes", + "ach t", + "m ag", + "P eter", + "ĠU FO", + "mont on", + "ĠL ith", + "Ġa rom", + "Ġsec uring", + "Ġconf ined", + "priv ate", + "Ġsw ords", + "Ġmark ers", + "Ġmetab olic", + "se lect", + "ĠCur se", + "ĠO t", + "g ressive", + "Ġinc umb", + "ĠS aga", + "Ġpr iced", + "Ġclear ance", + "Cont ent", + "Ġdr illing", + "Ġnot ices", + "Ġb ourgeois", + "Ġv est", + "Ġcook ie", + "ĠGuard ians", + "ry s", + "in yl", + "Ġ12 4", + "Ġpl ausible", + "on gh", + "ĠOd in", + "Ġconcept ion", + "ĠY uk", + "ĠBaghd ad", + "ĠFl ag", + "Aust ral", + "ĠI BM", + "Ġintern ationally", + "ĠWiki Leaks", + "I ED", + "Ġc yn", + "Ġcho oses", + "ĠP ill", + "Ġcomb ining", + "Ġrad i", + "ĠMoh ammed", + "def ense", + "atch ing", + "Sub ject", + "ic iency", + "Fr ame", + "Ġ{ \"", + "Ġche ss", + "Ġtim er", + "19 0", + "Ġt in", + "Ġord inance", + "emet ery", + "Ġacc using", + "Ġnotice able", + "Ġcent res", + "Ġl id", + "ĠM ills", + "img ur", + "Ġz oom", + "erg ic", + "Ġcomp ression", + "pr im", + "f ind", + "Ġsur g", + "Ġp and", + "ĠK ee", + "ĠCh ad", + "cell ence", + "oy le", + "Ġsocial ism", + "ĠT ravis", + "ĠM Hz", + "Ġgu ild", + "ALL Y", + "ĠSub scribe", + "ĠRel ated", + "Ġoccur rence", + "itch ing", + "Ġfict ional", + "Ġcr ush", + "ĠE A", + "c od", + "m ix", + "ĠTri ple", + "Ġretrie ve", + "Ġstimul us", + "Ġpsych iat", + "ĠDo or", + "Ġhomosexual ity", + "Ġelement ary", + "Ġcell ular", + "id ian", + "ĠL aun", + "Ġintrig uing", + "Ġfo am", + "ĠB ass", + "id i", + "its u", + "Ġass ure", + "Ġcongr at", + "Ġbusiness man", + "ĠBo ost", + "cl ose", + "Ġl ied", + "Ġsc iences", + "ĠO mega", + "ĠG raphics", + "Ġ< =", + "sp oken", + "Ġconnect ivity", + "S aturday", + "ĠAven gers", + "Ġto ggle", + "Ġank le", + "Ġnational ist", + "mod el", + "ĠP ool", + "ophob ia", + "V ar", + "ĠM ons", + "ator ies", + "Ġaggress ively", + "C lear", + "For ge", + "act ers", + "Ġhed ge", + "Ġpip es", + "Ġbl unt", + "Ġs q", + "Ġremote ly", + "W ed", + "as ers", + "Ġref riger", + "Ġt iles", + "Ġresc ued", + "Ġcompr ised", + "ins ky", + "Ġman if", + "avan augh", + "Ġprol ifer", + "Ġal igned", + "x ml", + "Ġtri v", + "Ġcoord ination", + "ĠP ER", + "ĠQu ote", + "13 4", + "b f", + "ĠS aw", + "Ġtermin ation", + "Ġ19 0", + "Ġadd itions", + "Ġtri o", + "Ġproject ions", + "Ġpositive ly", + "Ġin clusive", + "Ġmem br", + "19 90", + "old er", + "Ġpract iced", + "ink le", + "Ar ch", + "Ġstar ters", + "ari us", + "Ġinter mediate", + "ĠBen ef", + "ĠK iller", + "Ġinter ventions", + "ĠK il", + "ĠF lying", + "In v", + "Ġprem ature", + "Ġpsych iatric", + "Ġind ie", + "Ġcoll ar", + "ĠRain bow", + "af i", + "Ġdis ruption", + "ĠFO X", + "cast ing", + "Ġmis dem", + "c ro", + "Ġw ipe", + "ard on", + "Ġb ast", + "ĠTom my", + "ĠRepresent ative", + "Ġbell y", + "ĠP O", + "ĠBre itbart", + "13 2", + "Ġmess aging", + "Sh ould", + "Ref erences", + "ĠG RE", + "ist ical", + "L P", + "ĠC av", + "ĠC razy", + "Ġintu itive", + "ke eping", + "ĠM oss", + "Ġdiscont in", + "ĠMod ule", + "Ġun related", + "ĠPract ice", + "ĠTrans port", + "Ġstatist ically", + "orn s", + "Ġs ized", + "p u", + "Ġca f", + "ĠWorld s", + "ĠRod gers", + "ĠL un", + "ĠCom ic", + "l iving", + "Ġc ared", + "Ġclim bed", + ") {", + "Ġconsist ed", + "Ġmed ieval", + "fol k", + "Ġh acked", + "Ġd ire", + "ĠHerm ione", + "Ġt ended", + "ce ans", + "D aniel", + "w ent", + "Ġlegisl ators", + "Ġred es", + "g ames", + "Ġg n", + "am iliar", + "Ġ+ +", + "gg y", + "th reat", + "Ġmag net", + "Ġper ceive", + "Ġz ip", + "Ġindict ment", + "Ġcrit ique", + "g ard", + "ĠSaf e", + "ĠC ream", + "Ġad vent", + "ob a", + "Ġv owed", + "ous ands", + "Ġsk i", + "Ġabort ions", + "u art", + "Ġstun ned", + "Ġadv ancing", + "Ġlack ed", + "Ġ\\ \"", + "Ġsch izophren", + "Ġeleg ant", + "Ġconf erences", + "Ġcance led", + "ĠHud son", + "ĠHop efully", + "Ġtr ump", + "Ġfrequ encies", + "Ġmet eor", + "ĠJun ior", + "ĠFle et", + "ĠMal colm", + "ĠT ools", + "Ġ ........", + "Ġh obby", + "ĠEurope ans", + "Ġ15 00", + "ĠInt o", + "Ġs way", + "ĠApp ro", + "ĠCom pl", + "Comm unity", + "Ġt ide", + "ĠSum mit", + "ä »", + "Ġinter vals", + "ĠE ther", + "Ġhabit at", + "ĠSteven s", + "lish ing", + "ĠDom ain", + "Ġtrig gers", + "Ġch asing", + "Ġchar m", + "ĠFl ower", + "it ored", + "Ġbless ing", + "Ġtext ures", + "F ive", + "Ġliqu or", + "R P", + "F IN", + "Ġ19 62", + "C AR", + "Un known", + "Ġres il", + "ĠL ily", + "Ġabund ance", + "Ġpredict able", + "r ar", + "Ġbull shit", + "le en", + "che t", + "M or", + "M uch", + "ä ¹", + "Ġemphas ized", + "Ġcr ust", + "Ġprim itive", + "Ġenjoy able", + "ĠPict ures", + "Ġteam mate", + "pl er", + "ĠT ol", + "ĠK ane", + "Ġsummon ed", + "th y", + "ram a", + "ĠH onda", + "Ġreal izing", + "Ġquick er", + "Ġconcent rate", + "cle ar", + "Ġ2 10", + "ĠErd ogan", + "ar is", + "Ġrespond s", + "ĠB I", + "Ġelig ibility", + "Ġpus hes", + "ĠId aho", + "Ġagg rav", + "Ġru ins", + "ur ations", + "Ġb ans", + "Ġan at", + "sh are", + "Ġgr ind", + "h in", + "um en", + "Ġut ilities", + "ĠYan kees", + "Ġdat abases", + "ĠD D", + "Ġdispl aced", + "Ġdepend encies", + "Ġstim ulation", + "h un", + "h ouses", + "ĠP retty", + "ĠRaven s", + "ĠTOD AY", + "Ġassoci ates", + "Ġthe rape", + "cl ed", + "Ġde er", + "Ġrep airs", + "rent ice", + "Ġrecept ors", + "Ġrem ed", + "ĠC e", + "Ġmar riages", + "Ġball ots", + "ĠSold ier", + "Ġhilar ious", + "op l", + "13 8", + "Ġinherent ly", + "Ġignor ant", + "Ġb ounce", + "ĠE aster", + "REL ATED", + "ĠCur rency", + "E V", + "ãĥ ŀ", + "ĠLe ad", + "Ġdece ased", + "B rien", + "ĠMus k", + "J S", + "Ġmer ge", + "heart ed", + "c reat", + "m itt", + "m und", + "ĠâĢ ĭ", + "ĠB ag", + "Ġproject ion", + "Ġj ava", + "ĠStand ards", + "ĠLeon ard", + "Ġcoc onut", + "ĠPop ulation", + "Ġtra ject", + "Ġimp ly", + "Ġcur iosity", + "ĠD B", + "ĠF resh", + "ĠP or", + "Ġheav ier", + "ne ys", + "gom ery", + "Ġdes erved", + "Ġphr ases", + "ĠG C", + "Ġye ast", + "d esc", + "De ath", + "Ġreb oot", + "Ġmet adata", + "IC AL", + "Ġrep ay", + "ĠInd ependence", + "Ġsubur ban", + "ical s", + "Ġat op", + "Ġall ocation", + "gener ation", + "ĠG ram", + "Ġmoist ure", + "Ġp ine", + "ĠLiber als", + "Ġa ides", + "Ġund erest", + "ĠBer ry", + "Ġcere mon", + "3 70", + "ast rous", + "ĠPir ates", + "Ġt ense", + "ĠIndust ries", + "ĠApp eals", + "ĠN ear", + "Ġè£ı ç", + "Ġlo vers", + "ĠC AP", + "ĠC raw", + "Ġg iants", + "Ġeffic acy", + "E lement", + "ĠBeh avior", + "ĠToy ota", + "Ġint est", + "P riv", + "A I", + "Ġmaneu ver", + "Ġperfect ion", + "Ġb ang", + "p aper", + "r ill", + "Ge orge", + "b order", + "in ters", + "ĠS eth", + "Ġcl ues", + "ĠLe vi", + "ĠRe venue", + "14 7", + "Ġv apor", + "Ġfortun ate", + "Ġthreat ens", + "Ġve t", + "Ġdepend ency", + "ers ed", + "art icle", + "ĠBl izzard", + "Ġch lor", + "Ġmin us", + "ĠB ills", + "Ġcryptoc urrency", + "Ġmetabol ism", + "ter ing", + "Ġp estic", + "step s", + "ĠTre asure", + "ract ed", + "ĠConst ant", + "Ġtem p", + "13 9", + "ĠDet ective", + "ur ally", + "Ġrecover ing", + "Ġcort ex", + "Ġ14 4", + "cl osed", + "Ġprejud ice", + "aun ted", + "Ġstorm s", + "ĠN OW", + "Ġmach inery", + "Add ress", + "Ġcompe lled", + "27 0", + "Ġdesp air", + "b ane", + "Ġveget able", + "Ġbed s", + "Lear n", + "Ġcolor ful", + "Ġsp ike", + "Ġmarg ins", + "Ġsymp athy", + "Ġworks hop", + "ĠC BC", + "S at", + "Ġburn s", + "ĠG ender", + "Ġ12 9", + "ĠC able", + "Ġdeb ts", + "ĠThe resa", + "Ġreflect ing", + "Ġa irst", + "Ġr im", + "ram id", + "Ġweakness es", + "W rit", + "ogg le", + "t i", + "ĠCh arge", + "Ġwe ighed", + "Ġ( .", + "Ġl aughter", + "Ġrou ter", + "ĠDemocr acy", + "D ear", + "Ġhas ht", + "Ġd y", + "Ġhint s", + "run ning", + "Ġfin ishes", + "ar us", + "M ass", + "res ult", + "asc us", + "Ġv intage", + "Ġcon qu", + "Ġwild ly", + "ac ist", + "Ġl ingu", + "Ġprot agonist", + "st rom", + "te enth", + "ĠSol o", + "m ac", + "f illed", + "Ġre nown", + "it ives", + "Ġmot ive", + "ĠAnt ar", + "ĠM ann", + "ĠAd just", + "Ġrock ets", + "Ġtrou bling", + "e i", + "Ġorgan isms", + "ass is", + "Christ ian", + "Ġ14 5", + "ĠH ass", + "Ġsw all", + "Ġw ax", + "ĠSurv ival", + "V S", + "ĠM urd", + "v d", + "stand ard", + "Ġdrag ons", + "Ġacceler ation", + "r ational", + "f inal", + "Ġp aired", + "ĠE thereum", + "Ġinterf aces", + "Ġres ent", + "Ġartif acts", + "Å «", + "are l", + "Ġcompet itor", + "ĠNich olas", + "ĠSur face", + "c pp", + "ĠT ot", + "Ġeconom ically", + "Ġorgan ised", + "Ġen forced", + "in ho", + "Ġvar ieties", + "Ġab dom", + "ĠBa iley", + "id av", + "ĠSal v", + "p aid", + "Ġalt itude", + "ess ert", + "ĠG utenberg", + "are a", + "op oulos", + "Ġprofess ors", + "igg s", + "ĠF ate", + "he y", + "Ġ3 000", + "D ist", + "Ġtw ins", + "c ill", + "ĠM aps", + "Ġtra ps", + "Ġwe ed", + "ĠK iss", + "Ġy oga", + "Ġrecip ients", + "ĠWest minster", + "Ġpool s", + "ĠWal mart", + "18 8", + "ĠSchool s", + "att ack", + "ĠAR M", + "par agraph", + "W arning", + "j l", + "Ġself ish", + "anche z", + "ĠHe ights", + "F re", + "ĠS oph", + "Ġ --------------------------------", + "t ml", + "33 3", + "Ġraid s", + "Ġsatell ites", + "KE Y", + "Ġlast s", + "Ñ Ĥ", + "In s", + "ĠD ame", + "Ġunp redict", + "// /", + "gh ai", + "Ġart illery", + "Ġcru ise", + "Ġg el", + "ĠCabin et", + "Ġbl ows", + "ĠE sp", + "Ġprox imity", + "ot he", + "ĠSk ills", + "ĠU pper", + "ob o", + "ĠN DP", + "Ġenjoy s", + "Ġrepe ating", + "ĠConst ruction", + "ĠQuest ions", + "H illary", + "Ġu int", + "Ġprocess ors", + "ĠGib son", + "ĠMult iple", + "q a", + "ĠB om", + "ĠM iles", + "vent ional", + "Ġhur ts", + "s kin", + "ĠA IDS", + "Ġadvis ers", + "ĠR oot", + "Ġmethod ology", + "ĠD ale", + "Ġdet on", + "ĠKnow ledge", + "sequ ently", + "Ġ12 1", + "Ġconnect s", + "C y", + "ĠD anger", + "Ġcontribut ors", + "ĠB ent", + "Ġbr ass", + "ĠGun s", + "int o", + "ĠFort une", + "Ġbro ker", + "bal ance", + "Ġlength s", + "Ġv ic", + "Ġaver aging", + "Ġappropri ately", + "ĠCamer a", + "Ġsand wich", + "ĠCD C", + "Ġcoord inate", + "Ġnav ig", + "Ġgood ness", + "l aim", + "Ġbra ke", + "Ġextrem ist", + "ĠW ake", + "ĠM end", + "ĠT iny", + "ĠC OL", + "ĠR F", + "ĠD ual", + "ĠW ine", + "C ase", + "Ġref ined", + "Ġl amp", + "L ead", + "Ġb apt", + "ĠCar b", + "ĠS add", + "ĠMin neapolis", + "PD F", + "Ear ly", + "ĠH idden", + "I ts", + "ĠT IME", + "Ġp ap", + "Ġcommission ed", + "ĠF ew", + "ĠCol ts", + "ĠB ren", + "Ġbot hered", + "Ġlike wise", + "Ex per", + "ĠSch w", + "c ry", + "n n", + "ĠM itch", + "im on", + "M G", + "b m", + "UM P", + "r ays", + "Ġregist ry", + "Ġ2 70", + "ach ine", + "re lla", + "ant ing", + "00 000", + "Ġru ined", + "sp ot", + "Ġt a", + "Ġmaxim ize", + "Ġincon ven", + "D ead", + "H uman", + "En abled", + "ĠMar ie", + "Ġch ill", + "ĠParad ise", + "Ġstar ring", + "ĠLat ino", + "ĠProt ocol", + "ĠE VER", + "Ġsuppl iers", + "m essage", + "ĠBro ck", + "Ġser um", + "âĸĪâĸĪ âĸĪâĸĪ", + "Ġen comp", + "Ġamb ition", + "ues e", + "Ġar rows", + "And rew", + "Ġanten na", + "Ġ19 61", + "ĠB ark", + "Ġb ool", + "ãĤ ª", + "ĠSt orage", + "Ġrail way", + "Ġtoug her", + "ĠC ad", + "Ġwas hing", + "P y", + "' ]", + "em bed", + "ĠMem phis", + "ack le", + "Ġfam ously", + "ĠF ortunately", + "ov ies", + "Ġmind set", + "Ġsne ak", + "ĠD h", + "RA W", + "ĠSim pson", + "Ġliv est", + "Ġland mark", + "Ġc ement", + "L ow", + "Ġthr illed", + "ĠCour se", + "in el", + "Ġch uck", + "id ate", + "gl obal", + "Ġwh it", + "Ġ �", + "ad ays", + "s ki", + "ĠS V", + "Ġvir uses", + "30 6", + "ĠResp ons", + "Ġthe aters", + "ĠBr anch", + "ĠGene va", + "ĠM K", + "Ġunbel iev", + "Ġcommun ist", + "Orig inal", + "ĠRe ceived", + "ĠTrans fer", + "ĠAr g", + "In put", + "ĠStr ategy", + "Ġpal ace", + "the ning", + "D ri", + "Ġsent encing", + "umbn ail", + "Ġp ins", + "re cy", + "Ġs iblings", + "Get ting", + "ĠB U", + "ĠNorth west", + "Ġprolong ed", + "ĠSak ura", + "C omb", + "ĠB our", + "Ġinadequ ate", + "ĠK ash", + "Ġus ername", + "ĠImpro ve", + "Ġbatt ling", + "ĠM AC", + "Ġcurric ulum", + "Ġs oda", + "ĠC annon", + "Ġsens ible", + "sp ons", + "De cember", + "Ġw icked", + "ĠP engu", + "Ġdict ators", + "ĠHe arts", + "og yn", + "Ġsimilar ities", + "ĠSt ats", + "Ġh ollow", + "it ations", + "\": [", + "Ġh over", + "ĠList en", + "s ch", + "S und", + "Ġc ad", + "ĠPar ks", + "Ġl ur", + "Ġhy pe", + "ĠL em", + "N AME", + "is ure", + "Fr iday", + "Ġshoot s", + "Ġclos es", + "Ġd b", + "ĠR idge", + "ĠDiff erent", + "Ġrepl ies", + "ĠBroad way", + "op ers", + "Ġint oler", + "ĠZe us", + "akes pe", + "Ġpropri etary", + "Ġrequest ing", + "Ġcontro llers", + "ĠM IN", + "im edia", + "be cca", + "Ġexp ans", + "Ġoil s", + "B ot", + "ĠCh and", + "Ġpr inter", + "Ġto pped", + "ĠP OL", + "ĠEar lier", + "S ocial", + "av in", + "Ġdecre ases", + "ĠSe b", + "Ġspecific ations", + "ĠBl ast", + "ĠK urt", + "Ġfre el", + "B rown", + "Ġdil ig", + "ro e", + "ĠPro blem", + "ĠQu ad", + "Ġdecent ral", + "ĠV ector", + "an ut", + "Ġplug ins", + "ĠGreg ory", + "Ġfuck ed", + "el ines", + "ĠAmb assador", + "t ake", + "Ġcle ans", + "ong yang", + "An onymous", + "st ro", + "\" }", + "al ine", + "ĠO dd", + "ĠE ug", + "2 16", + "Ġbo il", + "ĠP owers", + "Ġnurs es", + "Ob viously", + "ĠTechn ical", + "Ġexceed ed", + "OR S", + "Ġextrem ists", + "Ġtr aces", + "ex pl", + "Ġcom r", + "ĠS ach", + ") /", + "Ġm asks", + "Ġsc i", + "B on", + "Ġreg ression", + "we gian", + "Ġadvis or", + "it ures", + "ĠV o", + "ex ample", + "ĠInst ruct", + "Ġs iege", + "Ġredu ctions", + "pt r", + "Ġstat utory", + "Ġrem oves", + "Ġp uck", + "red its", + "Ġbe e", + "Ġsal ad", + "Ġpromot ions", + "ĠJosh ua", + "with standing", + "ET H", + "ĠCh a", + "im us", + "Ġexpend iture", + "aun ting", + "Ġdelight ed", + "Ġ15 5", + "be h", + "Ġcar pet", + "ĠSp art", + "Ġj ungle", + "l ists", + "Ġbull ying", + "ĠNob el", + "ĠGl en", + "Ġreferen ced", + "Ġintrodu ces", + "se in", + "Ġcho pped", + "gl ass", + "ĠW rest", + "Ġneutral ity", + "Ġâ Ļ", + "Ġinvestig ator", + "Ġshel ves", + "Ġun constitutional", + "Ġreprodu ction", + "Ġmer chant", + "m ia", + "Ġmet rics", + "Ġexplos ives", + "ĠSon ia", + "Ġbod ily", + "Ġthick ness", + "Ġpredomin antly", + "ĠAb ility", + "Ġmon itored", + "IC H", + "Ġ] .", + "ĠMart inez", + "Ġvis ibility", + "Ġqu eries", + "Ġgen ocide", + "ĠWar fare", + "Qu ery", + "Ġstud ios", + "Ġemb ry", + "Ġcorrid or", + "Ġclean ed", + "com plete", + "ĠM H", + "Ġenroll ment", + "ING S", + "Ġimpact ed", + "Ġdis astrous", + "ĠY un", + "ĠCl aire", + "ĠBas ically", + "y t", + "uster ity", + "Ġindirect ly", + "w ik", + "Ġd od", + "ĠCar r", + "Ġam p", + "Ġprohib it", + "ĠIn itial", + "ĠR d", + "ij i", + "Ġeduc ate", + "c orn", + "i ott", + "ĠBeaut y", + "Ġdetect ive", + "ĠCon n", + "s ince", + "Ġst agger", + "Ġob ese", + "Ġb ree", + "olog ic", + "is se", + "walk er", + "Ġbl ades", + "Ġlaw ful", + "fun c", + "ĠBeh ind", + "Ġappet ite", + "Ġ( *", + "Ġt ennis", + "Ġoff spring", + "Ġj ets", + "Ġstruct ured", + "Ġafore mentioned", + "N ov", + "Ġsc aling", + "f ill", + "Ġst ew", + "Ġcur b", + "ĠStep han", + "ed In", + "S F", + "ob ic", + "é ŃĶ", + "ou g", + "ĠM M", + "Ġgen etically", + "ope z", + "13 6", + "Ġu mb", + "anc ers", + "Ġcoh ort", + "Ġmerch andise", + "Ġimp osing", + "ĠLegisl ature", + "ĠArch ive", + "iv ia", + "ĠN aval", + "Ġoff ences", + "Ġmir acle", + "Ġsn apped", + "Ġf oes", + "Ġextensive ly", + "ĠR af", + "Ġc ater", + "ed ience", + "K it", + "ĠB in", + "Ġrecomm ends", + "ĠC ities", + "Ġrig id", + "ĠRE AD", + "ĠNob le", + "ĠT ian", + "Ġcertific ates", + "ant is", + "o iler", + "ĠBudd hist", + "d id", + "Ġsurvey ed", + "Ġdown ward", + "Ġprint s", + "ĠMot ion", + "ron ics", + "ĠS ans", + "oss ibly", + "u ctions", + "Ġcolon ies", + "ĠDan ish", + "un it", + "Ġsp oil", + "Ġadvis ory", + "ber ries", + "Pl an", + "Ġspecific ation", + "op hers", + "ĠRes ource", + "Ġsh irts", + "prising ly", + "commun ications", + "Ġtriv ial", + "Ġmention ing", + "ise xual", + "Ġsupp lements", + "Ġsuper vision", + "B P", + "v or", + "Ġw it", + "Ġco oldown", + "Ġplaint iff", + "ĠReview s", + "ĠS ri", + "ĠM int", + "ĠSug ar", + "Ġafter ward", + "ĠPri est", + "ĠInvest ment", + "og ene", + "ĠT aking", + "Ġstretch ing", + "Ġinflamm ation", + "ĠTe hran", + "Ġl ining", + "Ġfree zing", + "ĠEnt ity", + "Ġins piring", + "spe cial", + "pr ice", + "Ġsu e", + "ĠP orter", + "oun ge", + "ET A", + "ĠD erek", + "ĠLu is", + "u o", + "ym ph", + "Ġex terior", + "ih il", + "ĠAsh ley", + "in ator", + "Ġnut rients", + "ĠTh rones", + "Ġfin ances", + "ĠIn spect", + "Ġspe cially", + "ĠRequ ired", + "ĠP TS", + "ĠViol ence", + "oint ed", + "sh ots", + "Ġex cerpt", + "co on", + "IN S", + "ĠG ri", + "Ġrecogn ised", + "We ek", + "You ng", + "Ġv om", + "is le", + "ĠCur ry", + "ĠBudd h", + "Ġnot ebook", + "Ġd urable", + "/ ?", + "ĠG ad", + "ĠP upp", + "Ġforg ive", + "p ark", + "Ġpersonal ities", + "an alysis", + "cl amation", + "Ġelev ator", + "Ġware house", + "ĠR ole", + "un n", + "Ġillust ration", + "ĠSc an", + "Ġatmosp heric", + "Im port", + "AN C", + "rict ed", + "f u", + "01 0", + "Ġar che", + "Ġreward ed", + "akespe are", + "Ġintern ally", + "ĠR BI", + "alk er", + "Ġeleph ant", + "ow itz", + "ĠP izza", + "Ġbip artisan", + "é s", + "Ġslow ed", + "ĠSt ark", + "Ġover ride", + "OU S", + "Ġ3 20", + "undred s", + "ĠDe ck", + "ĠC ensus", + "be e", + "14 6", + "ot or", + "Ġ ip", + "Ġu b", + "oc ations", + "ĠBut ton", + "r ice", + "Ġc ripp", + "ff f", + "Ġorig inated", + "Ġoverwhel med", + "app a", + "Ġfore most", + "âĢ ij", + "ĠL EG", + "re lease", + "eat ured", + "at ches", + "Ġre ps", + "Ġl ending", + "ĠRe ference", + "ĠCl ient", + "16 5", + "vent h", + "Com plete", + "ĠPat rol", + "Ġsw orn", + "c am", + "Ġshut tle", + "ĠR alph", + "Ġh ometown", + "- ,", + "on al", + "ĠB P", + "å ı", + "Ġpersu ade", + "ĠAlex and", + "Ġcomb ines", + "Ġv ivid", + "ĠL ag", + "Ġenc oding", + "Ġsal vation", + "w en", + "ĠRec overy", + "i ya", + "Un iversity", + "ĠB iden", + "Ġbud gets", + "ĠTex ans", + "f its", + "Ġhon ored", + "Ġp ython", + "T D", + "## #", + "cl one", + "Ġbl ink", + "ĠL iquid", + "Ġunemploy ed", + "Ġcl ashes", + "ĠCoun sel", + "Ġdirect ing", + "Ġpun ct", + "ĠFal cons", + "Ġsh ark", + "ĠDam ascus", + "Ġje ans", + "Ġemb ark", + "Ġse ize", + "Ġup wards", + "2 80", + "ĠE z", + "ĠAny thing", + "Ġex otic", + "l ower", + "ĠCreat or", + "ĠU m", + "Ġsubur bs", + "ber ger", + "ĠW end", + "Ġm int", + "ĠX X", + "ĠD ro", + "Ġsuff ers", + "Ġher b", + "t ree", + "Ġfrag ile", + "Ġflood ed", + "ĠAl cohol", + "ole an", + "ny der", + "ĠK O", + "F ram", + "Ġ13 6", + "Ġow ed", + "ĠMe lee", + "ĠH ash", + "Ġwh isk", + "Ġsu do", + "r r", + "Qu ick", + "app ro", + "Ġi i", + "ĠEx amples", + "he e", + "Ġpromot es", + "per ature", + "k ar", + "ĠHon or", + "Ġs odium", + "ĠL if", + "ros so", + "intend ent", + "Ġcorrespond ent", + "F ound", + "sec ret", + "Ġident ifies", + "ag ne", + "Ġl ou", + "ĠP P", + "Ġcoinc idence", + "m ove", + "Ġmilit ia", + "Ġinf iltr", + "ĠPrim ary", + "Ġpitch ing", + "ĠI b", + "ĠGO OD", + "ãĤ ¸", + "ĠW izards", + "ir al", + "ĠVen us", + "R R", + "ĠâĢ ķ", + "ĠCase y", + "Ġsad ly", + "Ġadm ire", + "Ġembarrass ed", + "c b", + "M el", + "Ġtub es", + "Ġbeaut ifully", + "ĠQueens land", + "Bel ow", + "re z", + "qu et", + "ple asant", + "Ġ «", + "C amp", + "Ġdec isive", + "19 98", + "ĠL amb", + "ut ton", + "h n", + "ĠJ agu", + "au nder", + "ĠC ord", + "Ġcl erk", + "Ġca ffe", + "Ġwip ed", + "Ġre im", + "ĠMount ains", + "Ġimprison ed", + "Ġdevelop s", + "ĠP ra", + "Ġmodel ing", + "Any one", + "ance l", + "ĠS it", + "Ġshield s", + "Ġl awn", + "Ġcard iovascular", + "Ġdemonstr ating", + "Ġpar se", + "ĠIsrael is", + "Ġeuro s", + "14 3", + "Ġgl orious", + "ins ki", + "ec d", + "Ġcondition ing", + "Ġhel pless", + "Ġmicro sc", + "ĠHar bor", + "Ġst akes", + "Ġ2 60", + "Ġun equ", + "ĠFl oyd", + "Ġd amp", + "Ġappar atus", + "ĠLaw s", + "Ġcoun ters", + "Ġindu ce", + "at able", + "ĠAh med", + "Ġsl am", + "N ovember", + "Ġpers ist", + "Ġim minent", + "á n", + "Ġsh red", + "Ġph ases", + "ĠEd monton", + "ĠArm strong", + "ĠMe et", + "ĠK itty", + "Ñ Ģ", + "c irc", + "ĠAd ult", + "Ġa rose", + "ĠX en", + "D an", + "g ow", + "Ġsuper f", + "ĠAd mir", + "Ġend ure", + "Ġkey word", + "yr us", + "Ġy arn", + "Ġpath way", + "ĠHop kins", + "mid t", + "Ġcens orship", + "d ependent", + "Ġinstruct or", + "S ources", + "Ġto e", + "Ġball oon", + "N ob", + "Ġsw ear", + "ĠCast ro", + "Ġgl oss", + "ĠK avanaugh", + "Ġremark ably", + "Ph otos", + "ĠN om", + "ĠS outheast", + "y ers", + "Ġvalid ation", + "Ġcann on", + "ĠVict ory", + "ĠPier re", + "Ġcaut ious", + "Aud io", + "Ġf etch", + "ĠG ift", + "ĠH yp", + "Ġrem edy", + "Z E", + "Ġsc ent", + "Ġbe ard", + "ĠR ut", + "- \"", + "Ġpat ents", + "H y", + "Ġun just", + "Ġpot ato", + "Ġforth coming", + "Ġche f", + "ĠR ift", + "aff e", + "ĠR OM", + "ĠL aunch", + "Ġp ads", + "ĠNe o", + "Ġon set", + "Ġsquee ze", + "s afe", + "Ġpref ix", + "ĠT M", + "ĠN early", + "ĠClin ical", + "ĠM ental", + "ot iation", + "ĠUn ic", + "ant ry", + "ĠC ir", + "Ġep it", + "à ¦", + "Ġextract ed", + "verse ly", + "ri ad", + "Ġstr ains", + "Ġto ps", + "Ġpo em", + "ĠRand y", + "ĠMap le", + "TH ER", + "up iter", + "ĠSS D", + "ļ é", + "Ġun con", + "per ing", + "Ġsle pt", + "in ers", + "Ġunder water", + "ĠEv idence", + "g one", + "20 5", + "Ġhistor ians", + "Ġsynt hesis", + "Ġf rog", + "b asketball", + "Ġvibr ant", + "Ġsub ord", + "Ġ3 65", + "ĠD ial", + "Ġcooper ate", + "HA HA", + "Ġgreet ed", + "15 8", + "Ġj azz", + "Ġinto x", + "ĠWalk ing", + "Ġsuper visor", + "ĠF usion", + "ĠMer cedes", + "s end", + "H am", + "s d", + "n l", + "Ġtour s", + "ĠF IFA", + "Ġcul p", + "g d", + "30 4", + "Ġple as", + "Ġillust rates", + "ĠColomb ia", + "Ġhighlight ing", + "ĠSum mary", + "Ġexp osing", + "ĠD ru", + "Ġir ony", + "r itional", + "ĠCar roll", + "ĠEll is", + "P ict", + "ĠR apt", + "Ġad apter", + "Ġun m", + "Ġcor pse", + "Ġceleb rities", + "D en", + "at um", + "ĠAp ocalypse", + "ĠW ag", + "lin ing", + "Ġhorm ones", + "R ub", + "ĠX i", + "ĠV aults", + "20 8", + "alky rie", + "inos aur", + "Ġfeed s", + "v ity", + "Ġdefe ating", + "W ait", + "Ġemphas ize", + "ĠSteel ers", + "yr inth", + "le ys", + "ĠWhe never", + "Current ly", + "ĠCl ock", + "Ġcollect ively", + "any on", + "ĠJ P", + "Ġment ality", + "Ġdownload s", + "Ġsurround ings", + "ĠBarn es", + "Ġflags hip", + "Ġindic ators", + "Ġgra pp", + "Jan uary", + "ĠElement al", + "ĠAthen a", + "ib al", + "Ġs ights", + "Ġcap ita", + "ĠTreat y", + "Ġvo iced", + "ĠG az", + "let te", + "Ġy a", + "Ġexp ired", + "Leg end", + "H ot", + "n ature", + "Ġunst able", + "Ġ2 80", + "à º", + "Com ment", + "AL E", + "Ġquest s", + "Ġhand ler", + "n is", + "Ġvers atile", + "Ġconce al", + "enge ance", + "ĠInter active", + "Ġobs essed", + "ĠDog s", + "Ġcr acked", + "S ound", + "s v", + "ĠD ylan", + "ro ads", + "f x", + "ĠCath olics", + "ĠH ag", + "Ġsl ammed", + "Ġgl owing", + "s ale", + "Ġtiss ues", + "ĠCh i", + "ne e", + "Ġc her", + "s ic", + "ur rection", + "Ġb acon", + "ul atory", + ") .\"", + "Ġir regular", + "FOR M", + "ass ed", + "Ġintention al", + "Ġcompens ate", + "ĠSpe aking", + "ĠS ets", + "15 3", + "Ġconvent ions", + "b ands", + "em ade", + "Ġe cc", + "ĠWin ston", + "ĠAssass in", + "ĠBelg ian", + "Ġdepend ence", + "Ġnic he", + "Ġb ark", + "ĠJ azz", + "Ġdisadvant age", + "Ġgas oline", + "Ġ16 5", + "çļ Ħ", + "ess a", + "mod ule", + "ang ular", + "O Y", + "ĠTreat ment", + "it as", + "ol ation", + "ĠArn old", + "Ġfe ud", + "ĠN est", + "Ġthe atre", + "ew ater", + "Ġmin ors", + "olic y", + "ĠH aven", + "div ision", + "Ġtr unk", + "F ar", + "ĠP ull", + "Ġcapt uring", + "Ġ18 00", + "ĠTe en", + "Ġex empl", + "Ġclin ics", + "ĠB urg", + "Ġsubst it", + "Ġpay load", + "ĠL av", + "ĠT roy", + "ĠW itness", + "Ġfrag ments", + "Ġpass words", + "Ġg ospel", + "ĠG in", + "Ġten ants", + "ol ith", + "S ix", + "Pre vious", + "ĠAg es", + "ĠDar win", + "Ġbl at", + "Ġem pathy", + "sm ith", + "b ag", + "ĠE cho", + "ĠC amb", + "ĠM add", + "ĠB oo", + "Ġred e", + "ĠBurn ing", + "Ġsmooth ly", + "ĠAd rian", + "ĠV ampire", + "ĠMon sters", + "ste am", + "Sty le", + "M a", + "re a", + "ĠD war", + "aly st", + "urs or", + "Ġelim ination", + "Ġcrypt o", + "ch t", + "ĠE ternal", + "âĢ¦ ]", + "ĠS orce", + "I ll", + "N ER", + "Ġu h", + "Con clusion", + "w age", + "Ġresp ir", + "Ġrem inis", + "het ical", + "Ġg y", + "Ġutil ized", + "ic idal", + "Ġ19 00", + "Ġhun ters", + "ĠSw an", + "ĠRe act", + "Ġvis itor", + "ĠThanks giving", + "30 8", + "Post s", + "Ġh ips", + "19 97", + "om ers", + "Ġkn ocking", + "ĠVeh icle", + "Ġt il", + "Ġ13 8", + "Ġm i", + "ĠInvest igation", + "ĠKen ya", + "Ġcas ino", + "Ġmot ives", + "Ġreg ain", + "re x", + "Ġweek ends", + "Ġstab bed", + "bor o", + "Ġexplo ited", + "ĠHA VE", + "ĠTe levision", + "c ock", + "Ġprepar ations", + "Ġende av", + "ĠRem ote", + "ĠM aker", + "ĠPro du", + "ĠEv an", + "Ġinform ational", + "ĠLouis ville", + "15 4", + "ĠDream s", + "Ġpl ots", + "ĠRun ner", + "Ġhur ting", + "Ġacad emy", + "ĠMont gomery", + "n m", + "ĠL anc", + "ĠAl z", + "2 10", + "el ong", + "Ġretail er", + "Ġar ising", + "Ġrebell ion", + "Ġbl onde", + "play ed", + "Ġinstrument al", + "C ross", + "Ġret ention", + "Ġtherape utic", + "Ġse as", + "Ġinfant ry", + "ĠCl int", + "Ġprompt ing", + "Ġbit ch", + "Ġst ems", + "ĠK ra", + "Ġthe sis", + "ĠB og", + "ru ed", + "Ġk ings", + "Ġcl ay", + "ific ent", + "ĠY ES", + "ĠTh ing", + "ĠCub s", + "vey ard", + "els h", + "in arily", + "ĠE y", + "ĠRoll ing", + "Ġev olving", + "Ind ia", + "Ġrecogn izes", + "Ġgrad uation", + "is ers", + "Ġfert ility", + "ĠMil an", + "Comm and", + "Ġbox ing", + "Ġ19 43", + "Ġgl uten", + "ĠEm ir", + "Ġid ol", + "Ġcon ceived", + "ĠCre ation", + "Mer it", + "udd y", + "uss ions", + "ĠLie utenant", + "iet al", + "Ġunch anged", + "ĠSc ale", + "ĠCrime a", + "ball s", + "ator ial", + "Ġdepth s", + "Ġempir ical", + "Ġtrans m", + "Ġuns afe", + "miss ible", + "com fort", + "15 6", + "Ġmechan ic", + "00 2", + "l ins", + "Ġsm oked", + "P os", + "Ġslow ing", + "Ġl av", + "Tex as", + "Ġche ating", + "ĠMet ropolitan", + "eth yl", + "Ġdiscover ing", + "as se", + "Ġpen cil", + "ĠPy ongyang", + "Ġclos et", + "ĠShe et", + "ĠEnt ry", + "ou stic", + "Ġmy st", + "er ate", + "ari at", + "Ġminer als", + "Ġmusic ian", + "ĠP ul", + "ĠM az", + "24 9", + "Ġper missions", + "Ġ iv", + "en ary", + "ick ers", + "ĠB ing", + "he a", + "en able", + "Ġgri ev", + "Ġassert ed", + "ĠColon el", + "Ġaff idav", + "w o", + "Ġse ated", + "ĠR ide", + "Ġpaint ings", + "ĠP ix", + "Ġ13 7", + "ish i", + "umb ai", + "g otten", + "ĠEar l", + "Ġin ning", + "Ġc ensus", + "Ġtrave lled", + "ĠCons ult", + "18 5", + "b ind", + "Ġsimpl icity", + "Ġoverlook ed", + "ĠHelp ful", + "Ġmon key", + "Ġoverwhelming ly", + "Bl ood", + "ĠFl int", + "ĠJ ama", + "ĠPres ent", + "ĠR age", + "ĠT A", + "pt ive", + "Ġturn out", + "w ald", + "ĠD olphins", + "ĠV PN", + "Ġon ion", + "Ġcraft ing", + "m ma", + "ĠMerc ury", + "Ġarr ange", + "Ġalert s", + "ĠO T", + "zb ollah", + "Ġg ases", + "ĠRichards on", + "s al", + "l ar", + "Ġfro st", + "Ġlower ing", + "Ġacc laim", + "Ġstart ups", + "ĠG ain", + "ess ment", + "Ġguard ian", + "äº º", + "ĠP ie", + "ĠL inks", + "Ġmer its", + "Ġaw ake", + "Ġparent al", + "Ġexceed s", + "Ġid le", + "ĠPil ot", + "Ġe Bay", + "ĠAc cept", + "ipe g", + "C am", + "ĠK ot", + "Ġtrad ers", + "olit ics", + "unk er", + "ĠP ale", + "os i", + "an mar", + "Ġ19 47", + "ĠF ell", + "est ial", + "it ating", + "G F", + "ĠS r", + "if ted", + "Ġconnect or", + "ĠB one", + "ill es", + "2 60", + "h ma", + "Ġoverl ap", + "ĠGit Hub", + "Ġclean er", + "ĠBapt ist", + "ĠW AS", + "Ġlung s", + "Ñ ģ", + "ĠB UT", + "Ġc ite", + "Ġpit ched", + "reat ment", + "Ġtro phies", + "ĠN u", + "38 6", + "ĠPr ide", + "Ġattend ees", + "[ ]", + "17 9", + "Ġspat ial", + "Ġpri zes", + "ĠRel igion", + "Ġshow case", + "ĠC ategory", + "vid ia", + "T arget", + "Pro perty", + "? ,", + "Ġf usion", + "p ie", + "ĠU CLA", + "Ġsound track", + "Ġprin cess", + "ĠC aval", + "sh ould", + "Ġlim bs", + "Back ground", + "Ġlone ly", + "Ġc ores", + "ĠT ail", + "she et", + "Ġ13 2", + "R a", + "ãĤ «", + "ĠB olt", + "Ġbook ed", + "Ġadmin ister", + "Ġequ als", + "w y", + "Ġobserv ing", + "ĠBar on", + "ĠAd obe", + "Ġv irgin", + "ĠSocial ist", + "M ove", + "gh azi", + "ĠLind a", + "2 12", + "Ġbre wing", + "Ġmerch ants", + "bur se", + "Ġdiv or", + "Ġmet als", + "ĠN er", + "Ġsum s", + "ĠEn emy", + "Ġen vision", + "Ġgrant ing", + "ĠH oney", + "ĠSk yrim", + "Ġsoc io", + "gr aded", + "Ġselect ive", + "W ASHINGTON", + "Ġ19 48", + "ĠSir ius", + "ĠG ross", + "act ivity", + "ĠI van", + "Ġfur ious", + "BS D", + "ĠPre vious", + "Ġrespons ive", + "Ġchar itable", + "Ġle aning", + "ĠP ew", + "Ġviol ates", + "\\\\\\\\ \\\\\\\\", + "ĠCom ing", + "w ire", + "Ġpo et", + "Ġres olutions", + "comm and", + "ĠPortug uese", + "Ġnick name", + "Ġde af", + "Feb ruary", + "Ġrecogn ise", + "Ġentire ty", + "Ġseason al", + "pl aced", + "ĠTe legraph", + "Ġmicro phone", + "our ing", + "Ġgr ains", + "Ġgovern ed", + "Ġpost p", + "ĠW aters", + "in ement", + "Ġund ocumented", + "ĠCom cast", + "Ġf ox", + "Ġassault s", + "re on", + "man y", + "ĠJen kins", + "ĠAny way", + "Ġassess ments", + "Ġdown s", + "ĠM ouse", + "Ġsuper b", + "k t", + "ĠD ow", + "Ġtax ation", + "4 01", + "Ġsm iles", + "Ġundert aken", + "Ġex h", + "Ġenthusi astic", + "Ġtw ent", + "Ġgovernment al", + "Ġautonom y", + "ĠTechn ologies", + "ĠCh ain", + "Ġpreval ent", + "f b", + "Ġnic otine", + "og ram", + "j ob", + "Ġawa iting", + "ĠMen u", + "Ġdep uties", + "k ov", + "ish ops", + "But ton", + "ĠShan ghai", + "Ġdies el", + "ĠD uck", + "R yan", + "ĠPC s", + "N F", + "j ury", + "ent e", + "Ġinacc urate", + "edd y", + "Wh atever", + "Ġshow c", + "ĠN ad", + "od us", + "et r", + "Ġplaint iffs", + "ĠW OR", + "ĠAss ange", + "Ġpriv at", + "Ġpremium s", + "Ġt am", + "UR L", + "Ġel ites", + "ĠR anger", + "otten ham", + "ĠH off", + "ĠAt hens", + "Ġdefin ite", + "Ġs ighed", + "Ġeven ly", + "2 11", + "ĠAm ber", + "ak ia", + "Ġmail ing", + "Ġcr ashing", + "ĠConfeder ate", + "ru gged", + "W al", + "ĠDep ths", + "Ġjuven ile", + "Ġreact or", + "Introdu ction", + "ĠDel uxe", + "19 95", + "ĠS anchez", + "ĠM ead", + "iv able", + ": -", + "ĠPlan ning", + "ĠT rap", + "qu in", + "ĠProt ect", + "ve red", + "In formation", + "Ġkid ney", + "inn amon", + "l as", + "Ġpolic ing", + "Ġtoler ate", + "ĠQ i", + "Ġbi ased", + "F ort", + "ĠK i", + "s ave", + "Ġprivile ged", + "Ġbe asts", + "ĠGl as", + "ĠC inem", + "Ġcome back", + "Sund ay", + "Ġext inction", + "h ops", + "Ġtrans mit", + "Ġdoub les", + "ĠFl at", + "16 7", + "Ġdis puted", + "Ġinjust ice", + "f oo", + "V ict", + "role um", + "ĠJul ie", + "Con text", + "ĠR arity", + "iss ue", + "Comp onent", + "Ġcounsel ing", + "an ne", + "d ark", + "Ġobject ions", + "u ilt", + "Ġg ast", + "Ġpl ac", + "Ġun used", + "ãĥ ĩ", + "ĠT rial", + "ĠJ as", + "hed ral", + "ob b", + "Ġtempor al", + "ĠPR O", + "ĠN W", + "ĠAnn iversary", + "L arge", + "Ġther m", + "Ġd avid", + "Ġsystem ic", + "ĠSh ir", + "m ut", + "ĠNe pt", + "add ress", + "Ġscan ning", + "Ġunderstand able", + "Ġcan vas", + "C at", + "ĠZ oo", + "Ġang els", + "L O", + "ĠStat ement", + "ĠS ig", + "ov able", + "ĠA way", + "sh aring", + "ocr ats", + "st ated", + "Ġweigh ing", + "N or", + "w ild", + "B ey", + "Ġaston ishing", + "ĠReyn olds", + "Ġop ener", + "Ġtrain er", + "Ġsurg ical", + "p n", + "Ġadjust ing", + "whe el", + "Ġf rown", + "erv ative", + "Ġsusp end", + "With in", + "te in", + "Ġobst acle", + "Ġliber ties", + "ym es", + "Ġur anium", + "ans om", + "an ol", + "ub a", + "ĠL oss", + "Ġa rous", + "ĠHend erson", + "W ow", + "s pl", + "c ur", + "Ġ Ń", + "Ġtheir s", + "Dam age", + "Ġdownload ing", + "Ġdisc ern", + "ĠSt o", + "ĠFl a", + "Ġh ath", + "ĠA j", + "Ġun pleasant", + "Europe an", + "exp ensive", + "Ġscreens hot", + "ĠU V", + "Ġall ied", + "ĠPers ian", + "Ġmonop oly", + "Ġat om", + "ĠReds kins", + "\"> <", + "Ġcan cell", + "Ġcinem a", + "13 1", + "f air", + "ĠAlf red", + "Ġd uck", + "arg s", + "22 3", + "ĠIS I", + "Ġsign aling", + "in ar", + "Ġlaugh s", + "Ġfor wards", + "Ġreck less", + "Ġlisten ers", + "at ivity", + "Ġvast ly", + "n ant", + "L ess", + "ĠHun ting", + "ĠScient ific", + "IT ED", + "Ġkn ight", + "ĠH TC", + "us a", + "t mp", + "Ġr ude", + "ĠLegend ary", + "Ġar ises", + "B ad", + "ĠCl aim", + "pe g", + "Ġreal ities", + "Th ink", + "Ġ °", + "Ġro de", + "Ġstri ve", + "Ġan ecd", + "Ġshort s", + "Ġhypot hes", + "Ġcoord inated", + "ĠGand hi", + "ĠF PS", + "R ED", + "Ġsuscept ible", + "Ġshr ink", + "ĠCh art", + "Hel p", + "Ġ ion", + "de ep", + "rib es", + "ĠK ai", + "ĠCustom er", + "Sum mary", + "Ġc ough", + "w ife", + "Ġl end", + "Ġposition ing", + "Ġlot tery", + "ĠC anyon", + "Ġf ade", + "Ġbron ze", + "ĠKenn y", + "Ġbo asts", + "ĠEnh anced", + "rec ord", + "Ġemer gence", + "Ġa kin", + "ĠB ert", + "it ous", + "âĸ ij", + "Ġst ip", + "Ġexch anged", + "om ore", + "als h", + "Ġreserv oir", + "Ġstand point", + "W M", + "Ġiniti ate", + "Ġdec ay", + "Ġbrew ery", + "Ġter ribly", + "Ġmort al", + "lev ard", + "Ġrev is", + "N I", + "el o", + "Ġconf ess", + "ĠMS NBC", + "Ġsub missions", + "Cont roller", + "Ġ20 2", + "ĠR uth", + "} );", + "ĠAz ure", + "Ġ .\"", + "20 6", + "ĠMarket ing", + "Ġl aund", + "ien cies", + "Ġrenown ed", + "ĠT rou", + "ĠN GO", + "ble ms", + "Ġterr ified", + "Ġwar ns", + "Ġper t", + "Ġuns ure", + "4 80", + "ale z", + "ult z", + "ĠOut side", + "Ġst yl", + "ĠUnder ground", + "Ġp anc", + "Ġd ictionary", + "Ġf oe", + "rim inal", + "ĠNor wegian", + "Ġj ailed", + "Ġm aternal", + "é e", + "ĠLu cy", + "c op", + "Ch o", + "Ġuns igned", + "ĠZe lda", + "ĠIns ider", + "ĠContin ued", + "Ġ13 3", + "ĠNar uto", + "ĠMajor ity", + "16 9", + "ĠW o", + "ãĤ ĵ", + "Ġpast or", + "Ġinform al", + "Ð ½", + "an throp", + "jo in", + "ãģ Ĺ", + "it ational", + "N P", + "ĠWrit ing", + "f n", + "ĠB ever", + "19 5", + "Ġy elling", + "Ġdr astically", + "Ġe ject", + "Ġne ut", + "Ġth rive", + "ĠFre qu", + "ou x", + "Ġpossess es", + "ĠSen ators", + "ĠD ES", + "ĠSh akespeare", + "ĠFran co", + "ĠL B", + "uch i", + "Ġinc arn", + "Ġfound ers", + "F unction", + "Ġbright ness", + "ĠB T", + "Ġwh ale", + "ĠThe ater", + "m ass", + "ĠD oll", + "S omething", + "Ġecho ed", + "ĠHe x", + "c rit", + "af ia", + "Ġgodd ess", + "Ġele ven", + "ĠPre view", + "ĠAur ora", + "Ġ4 01", + "uls ive", + "ĠLog an", + "in burgh", + "ĠCent ers", + "ĠON LY", + "ĠA id", + "Ġparad ox", + "Ġh urd", + "ĠL C", + "D ue", + "c ourt", + "Ġoff ended", + "Ġeval uating", + "ĠMatthew s", + "Ġto mb", + "Ġpay roll", + "Ġextra ction", + "ĠH ands", + "if i", + "Ġsuper natural", + "ĠCOM M", + "] =", + "dog s", + "Ġ5 12", + "ĠMe eting", + "Rich ard", + "ĠMax imum", + "Ġide als", + "Th ings", + "m and", + "ĠReg ardless", + "Ġhum ili", + "b uffer", + "L ittle", + "ĠD ani", + "ĠN ak", + "Ġliber ation", + "ĠA be", + "ĠO L", + "Ġstuff ed", + "ac a", + "ind a", + "raph ic", + "Ġmos qu", + "Ġcampaign ing", + "Ġoccup y", + "S qu", + "r ina", + "ĠW el", + "ĠV S", + "Ġphys ic", + "Ġp uls", + "r int", + "oad ed", + "ET F", + "ĠArch ives", + "Ġven ues", + "h ner", + "ĠTur bo", + "Ġl ust", + "Ġappeal ed", + "que z", + "il ib", + "ĠTim othy", + "Ġo mn", + "d ro", + "Ġobs ession", + "ĠSav age", + "19 96", + "Gl obal", + "J es", + "2 14", + "Ġsl iding", + "Ġdisapp ro", + "ĠMag ical", + "Ġvolunt arily", + "g b", + "ane y", + "Ġprop het", + "ĠRe in", + "ĠJul ia", + "ĠW orth", + "aur us", + "Ġb ounds", + "ie u", + ")) )", + "Ġcro re", + "ĠCitiz en", + "S ky", + "Ġcolumn ist", + "Ġseek ers", + "ond o", + "IS A", + "ĠL ength", + "Ġnost alg", + "Ġnew com", + "Ġdet rim", + "ent ric", + "3 75", + "ĠG E", + "Ġaut op", + "Ġacadem ics", + "App Data", + "ĠS hen", + "Ġid iot", + "ĠTrans it", + "Ġteasp oon", + "W il", + "K O", + "ĠCom edy", + "> ,", + "Ġpop ulated", + "W D", + "Ġp igs", + "ĠO culus", + "Ġsymp athetic", + "Ġmar athon", + "19 8", + "Ġseiz ure", + "s ided", + "Ġd op", + "irt ual", + "L and", + "ĠFl oor", + "osa urs", + "... ]", + "Ġl os", + "Ġsubsid iary", + "E Y", + "ĠPart s", + "ĠSt ef", + "ĠJud iciary", + "Ġ13 4", + "Ġmir rors", + "Ġk et", + "t imes", + "Ġneuro log", + "Ġc av", + "ĠGu est", + "Ġtum or", + "sc ill", + "ĠLl oyd", + "E st", + "Ġcle arer", + "Ġstere otypes", + "Ġd ur", + "not hing", + "Red dit", + "Ġnegoti ated", + "---------------- --------", + "23 5", + "Ġfl own", + "ĠSe oul", + "ĠRes ident", + "ĠS CH", + "Ġdisappear ance", + "ĠV ince", + "g rown", + "Ġgrab s", + "r il", + "ĠInf inite", + "ĠTw enty", + "Ġpedest rian", + "Ġjer sey", + "ĠF ur", + "ĠInf inity", + "ĠEll iott", + "Ġment or", + "Ġmor ally", + "Ġob ey", + "sec ure", + "iff e", + "Ġantib iotics", + "ang led", + "ĠFre eman", + "ĠIntrodu ction", + "J un", + "Ġm arsh", + "ic ans", + "ĠEV ENTS", + "och ond", + "W all", + "icult y", + "Ġmisdem eanor", + "Ġl y", + "Th omas", + "ĠRes olution", + "Ġanim ations", + "ĠD ry", + "Ġinter course", + "ĠNew castle", + "ĠH og", + "ĠEqu ipment", + "17 7", + "Ġterrit orial", + "Ġarch ives", + "20 3", + "Fil ter", + "ĠMun ich", + "Ġcommand ed", + "ĠW and", + "Ġpit ches", + "ĠCro at", + "Ġrat ios", + "ĠM its", + "Ġaccum ulated", + "ĠSpecific ally", + "Ġgentle man", + "acer b", + "Ġp enn", + "Ġa ka", + "ĠF uk", + "Ġinterven e", + "ĠRef uge", + "ĠAlz heimer", + "Ġsuccess ion", + "oh an", + "d oes", + "L ord", + "Ġsepar at", + "Ġcorrespond ence", + "Ġsh iny", + "P rior", + "Ġs ulf", + "Ġmiser able", + "Ġded ication", + "( ).", + "Ġspecial ists", + "Ġdefect s", + "ĠC ult", + "ĠX ia", + "Ġje opard", + "ĠO re", + "Ab ility", + "Ġle ar", + "Ġamb itions", + "ĠB MI", + "ĠArab s", + "Ġ19 42", + "Ġpres ervation", + "ific ate", + "Ġash amed", + "l oss", + "ĠRest aur", + "Ġrese mble", + "Ġen rich", + "ĠK N", + "ĠCl an", + "fl oat", + "Ġplay able", + "IT T", + "Ġharm ony", + "arr ison", + "ĠWe instein", + "w ere", + "Ġpoison ing", + "ĠCom put", + "ĠWord Press", + "m ajor", + "ĠVal ve", + "F an", + "ĠTh row", + "ĠRom ans", + "ĠDep ression", + "ad os", + "Ġtort ured", + "Ġbal ancing", + "bott om", + "Ġacqu iring", + "ĠMon te", + "ard i", + "Ġa ura", + "Ġ# #", + "ĠStand ing", + "ĠAtl as", + "C F", + "Ġintr ins", + "ĠBen ghazi", + "Ġcamp ing", + "Ġt apped", + "bl ade", + "st rous", + "ĠR abb", + "ĠW ritten", + "t ip", + "ĠNe igh", + "ster dam", + "ĠAll ow", + "ĠHe aling", + "ĠR hod", + "n um", + "Ġcaffe ine", + "ĠPer cent", + "Ġbo o", + "Ġapp les", + "30 5", + "Ġwel coming", + "Ġappl aud", + "Ġa usterity", + " ±", + "ĠRe ality", + "ef e", + "å ®", + "Ġsu cks", + "Ġtab s", + "ĠPay Pal", + "Ġback pack", + "Ġgif ted", + "abul ary", + "ĠSc out", + "ir teen", + "Ġch in", + "Ġo mitted", + "Ġnegative ly", + "Ġaccess ing", + "ĠE arn", + "Ġambul ance", + "Ġhead phones", + "Ġ20 5", + "ĠRef resh", + "p resident", + "ĠKit chen", + "ĠEnt ered", + "ĠS nyder", + "00 5", + "om ical", + "Ġborrow ed", + "ĠN em", + "Ġav iation", + "Ġst all", + "rim ination", + "Ġuniform s", + "it ime", + "ĠSim mons", + "ener gy", + "ab lished", + "y y", + "qual ified", + "Ġrall ies", + "ĠSt uart", + "fl ight", + "Ġgang s", + "r ag", + "Ġv ault", + "lu x", + "ĠCom par", + "Ġdesign ation", + "20 9", + "ĠJ os", + "d ollar", + "z ero", + "Ġwell s", + "30 3", + "Ġconstitu ents", + "Ġhe ck", + "Ġc ows", + "Ġcommand ers", + "Ġdifferent ial", + "ĠC atherine", + "29 9", + "Ġval ve", + "Ġbr ace", + "Ġperspect ives", + "c ert", + "f act", + "icular ly", + "ĠMc N", + "pl anes", + "Ġint ric", + "Ġpe as", + "ov an", + "Ġtoss ed", + "ret ch", + "ĠL opez", + "Ġunf amiliar", + "de ath", + "ĠA part", + "ĠCh ang", + "Ġrelie ved", + "rop he", + "Ġair ports", + "Ġfre ak", + "ut il", + "M ill", + "ĠCh in", + "ĠOw en", + "m ale", + "ĠBro ken", + "ĠWind s", + "ro b", + "r ising", + "Ġfire fighters", + "Ġauthor itarian", + "Ġ14 8", + "Bit coin", + "ex ternal", + "Ġbrow sers", + "iche ver", + "or ian", + "Ġun b", + "Ġpo ke", + "ĠZ ot", + "M id", + "ĠPop ular", + "Ġco vert", + "Ġcont ributes", + "Ġ6 50", + "Ġcont ention", + "G ate", + "Ġcons oles", + "Ġchrom os", + "ĠI X", + "Ġvis ually", + "ĠE isen", + "Ġjewel ry", + "Ġdeleg ation", + "Ġacceler ate", + "ĠR iley", + "Ġsl ope", + "Ġind oor", + "it ially", + "Ġhuge ly", + "Ġtun nels", + "Ġfin ed", + "Ġdirect ive", + "Ġfore head", + "ustom ed", + "Ġsk ate", + "Mus ic", + "g as", + "Ġrecogn izing", + "am bo", + "Ġover weight", + "ĠGr ade", + "Ù Ĭ", + "Ġsound ing", + "Ġlock ing", + "ĠR EM", + "St ore", + "Ġexc av", + "ĠLike wise", + "ĠL ights", + "Ġel bow", + "ĠSupp ly", + "w ic", + "Ġhands ome", + "19 94", + "C oll", + "Ġadequ ately", + "ĠAssoci ate", + "Ġstri ps", + "Ġcrack down", + "Ġmar vel", + "ĠK un", + "Ġpass ages", + "@@ @@", + "ĠT all", + "Ġthought ful", + "names e", + "Ġprost itution", + "bus iness", + "Ġball istic", + "person al", + "c ig", + "iz ational", + "R ound", + "ĠÂłĠÂł ĠÂłĠÂł", + "ĠCole man", + "Ġadm itting", + "ĠPl ug", + "Ġbit coins", + "ĠSu z", + "Ġfair ness", + "Ġsupp lier", + "Ġcatast rophic", + "ĠHel en", + "o qu", + "M arc", + "ĠArt icles", + "g ie", + "Ġend angered", + "Ġdest iny", + "ĠVol t", + "ol ia", + "ax is", + "Ġche at", + "Ġun ified", + "IC O", + "qu ote", + "30 2", + "ĠS ed", + "Ġsupp ression", + "Ġanaly zing", + "Ġsqu at", + "Ġfig uring", + "Ġcoordin ates", + "Ġch unks", + "Ġ19 46", + "Ġsub p", + "Ġw iki", + "ĠFor bes", + "ĠJ upiter", + "ĠE rik", + "im er", + "ĠCom mercial", + "\\ )", + "Ġlegitim acy", + "Ġd ental", + "ĠMe an", + "Ġdefic its", + "5 50", + "Orig inally", + "ĠHor ror", + "Ġcontam ination", + "ll ah", + "Ġconf isc", + "ĠCl are", + "T B", + "ĠF ailed", + "an ed", + "Ġrul er", + "ĠCont roller", + "Ġfemin ists", + "F ix", + "g ay", + "20 7", + "Ġr abbit", + "Th ird", + "ownt own", + "Ġgl ue", + "Ġvol atile", + "Ġsh ining", + "Ġf oll", + "Ġimp aired", + "Ġsup ers", + "æ Ī", + "Ġcl utch", + "ļé ĨĴ", + "Ġpro let", + "Ġ( !", + "Ġy elled", + "ĠK iev", + "ĠEr n", + "ĠSh ock", + "K B", + "Ġsit uated", + "qu ery", + "ĠN as", + "Ġan nex", + "char acter", + "ĠHol iday", + "Ġautom ation", + "ĠJ ill", + "ĠRem astered", + "Ġl inem", + "Ġwild erness", + "ĠHor izon", + "ĠGu inea", + "A Z", + "Ġmain land", + "Ġsec recy", + "LE ASE", + "Ġp unk", + "ĠProv ince", + "( ),", + "Spe ed", + "Ġhand ing", + "ĠSeb ast", + "S ir", + "r ase", + "Ġj ournals", + "Ġcon gest", + "ĠT ut", + "ir rel", + "Ġschizophren ia", + "Ġmis ogyn", + "health y", + "I ron", + "Ġreact ed", + "- $", + "25 2", + "Ġpl ural", + "Ġpl um", + "Ġbarg ain", + "Ġground ed", + "f inder", + "Ġdis se", + "ĠL az", + "O OD", + "Ġat roc", + "F actory", + "Ġmin ions", + "Ġo ri", + "ĠB rave", + "ĠP RE", + "ĠMy anmar", + "ĠH od", + "Ġexped ition", + "Ġexpl ode", + "ĠCo ord", + "Ġext r", + "ĠB rief", + "ĠAD HD", + "Ġhard core", + "feed ing", + "Ġd ile", + "ĠF ruit", + "Ġvacc ination", + "ĠM ao", + "osp here", + "Ġcont ests", + "- |", + "Ġf ren", + "isp here", + "R om", + "ĠSh arp", + "ĠTre nd", + "Ġdis connect", + "âĢ¢ âĢ¢", + "Ġper secution", + "Ear th", + "Ġhealth ier", + "38 4", + "Ġc ob", + "ĠTr inity", + "OW S", + "AN N", + "Ġspecial ty", + "Ġg ru", + "Ġcooper ative", + "wh y", + "Start ing", + "ĠIss ues", + "st re", + "ens or", + "Ġ18 5", + "Ad v", + "! ?", + "ĠRe vel", + "em ia", + "ĠH ulk", + "Ġcelebr ations", + "ĠS ou", + "ra ud", + "ĠKle in", + "Ġun real", + "con text", + "Ġpartners hips", + "Ġadop ting", + "t ical", + "Ġspl ash", + "ĠHe zbollah", + "c ategory", + "cycl op", + "xt on", + "ĠD ot", + "urd y", + "t z", + "Ġenvelop e", + "ĠN L", + "â ķ", + "Ġwhere in", + "Spe c", + "18 4", + "Ġte lev", + "al iation", + "Ġmyth s", + "å °", + "Ġrig orous", + "Ġcommun icating", + "Ġobser ver", + "Ġre he", + "ĠW ash", + "Ġapolog ized", + "ĠT in", + "Ġexpend itures", + "work ers", + "d ocument", + "Ġhes itate", + "ĠLen in", + "Ġunpredict able", + "Ġrenew al", + "cl er", + "ok ia", + "ĠCON T", + "Ġpost season", + "Tok ens", + "Ġex acerb", + "Ġbet ting", + "Ġ14 7", + "Ġelev ation", + "W ood", + "ĠSol omon", + "19 4", + "00 4", + "out put", + "Ġredu nd", + "ĠM umbai", + "Ġp H", + "Ġreprodu ce", + "ĠD uration", + "MA X", + "Ġb og", + "C BS", + "ĠBal ance", + "ĠS gt", + "ĠRec ent", + "Ġc d", + "Ġpo pped", + "Ġincomp et", + "pro p", + "ay an", + "g uy", + "Pac ific", + "Ġty r", + "Ġ{ {", + "ĠMy stic", + "ĠD ana", + "Ġmast urb", + "Ġge ometry", + "à ¢", + "ĠCor rect", + "Ġtraject ory", + "Ġdistract ed", + "Ġf oo", + "ĠW elsh", + "L uc", + "m ith", + "Ġrug by", + "Ġrespir atory", + "Ġtri angle", + "Ġ2 15", + "Ġunder graduate", + "ĠSuper ior", + "ch anging", + "_ -", + "Ġright ly", + "Ġrefere e", + "Ġluc rative", + "Ġun authorized", + "Ġresemb les", + "ĠGN U", + "ĠDer by", + "Ġpath ways", + "ĠL ed", + "Ġend urance", + "Ġst int", + "Ġcollect or", + "F ast", + "Ġd ots", + "Ġnational s", + "ĠSec urities", + "Ġwh ip", + "Par am", + "Ġlearn s", + "M agic", + "Ġdetail ing", + "m oon", + "Ġbroadcast ing", + "Ġb aked", + "26 5", + "hol m", + "ĠS ah", + "ĠHus sein", + "ĠCourt esy", + "17 4", + "Ġ14 6", + "Ġge ographic", + "pe ace", + "Ġjud ging", + "ĠS tern", + "B ur", + "Ġstory line", + "G un", + "ĠSt ick", + "24 5", + "30 7", + "ãĤ´ ãĥ³", + "ĠAdminist rator", + "Ġbur nt", + "Ġp ave", + "ch oes", + "Ex ec", + "Ġcamp uses", + "Res ult", + "Ġmut ations", + "ĠCh arter", + "Ġcapt ures", + "Ġcomp ares", + "Ġbad ge", + "S cient", + "Ġer ad", + "ier y", + "o i", + "ett es", + "ĠE state", + "Ġst rap", + "Ġproud ly", + "Ġf ried", + "Ġwithd rawn", + "ĠV oy", + "ph ony", + "It ems", + "ĠP ierce", + "b ard", + "Ġann otation", + "ant on", + "ill on", + "Im pro", + "... )", + "Ġhapp ier", + "---- --", + "ad just", + "Ġstaff ers", + "Ġactiv ism", + "Ġper f", + "Ġal right", + "N eed", + "Ġcomm ence", + "Ġopio id", + "ĠAm anda", + "E s", + "ĠP ars", + "ĠK aw", + "W orks", + "24 8", + "Ġind o", + "t c", + "end ant", + "ĠM oto", + "Ġlegal ization", + "OT E", + "Ġtask ed", + "Ġt sp", + "ĠACT IONS", + "16 6", + "Ġrefres hing", + "ĠN R", + "ĠPere z", + "Ġinfring ement", + "S Y", + "List en", + "in ning", + "k u", + "Ġrot ate", + "pro gram", + "ar ah", + "Des ign", + "Ġ( £", + "Ġst oring", + "Ġwar rants", + "Ġjud gement", + "ĠB rist", + "us ually", + "ph oto", + "ĠR an", + "ĠP ine", + "Ġoutrage ous", + "ĠValent ine", + "lu ence", + "ĠEvery body", + "Al tern", + "Ġrele vance", + "Ġtermin ated", + "Ġd essert", + "Ġfulf illed", + "Ġprosecut ed", + "ĠW ords", + "Ġm igrant", + "Ġcultiv ation", + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ", + "idel ity", + "ĠV ern", + "ĠLog in", + "Ġmetaph or", + "ĠT ip", + "Ġrecru its", + "ĠP ig", + "rib ing", + "Ġenthusi asts", + "ex per", + "Ġfright ening", + "ĠH air", + "ans on", + "str ate", + "Ġh i", + "He ight", + "Ġown ing", + "n one", + "Ġdis like", + "Ġkn ives", + "pher d", + "Ġloud ly", + "ĠAP Is", + "Dis play", + "ĠL ac", + "ĠUS S", + "ab l", + "ver ages", + "J ew", + "Ġ17 2", + "ĠHist orical", + "at oon", + "ĠPhys ics", + "in tern", + "Ġwarm th", + "Ġto pp", + "D M", + "Ġgun man", + "Ġem peror", + "od i", + "ãĥ £", + "in atory", + "ĠR ib", + "Ġ13 1", + "ĠSat urn", + "ĠSh ining", + "Ġw aking", + "Qu otes", + "Ġcomed ian", + "en berg", + " ½", + "Ġbelie vers", + "Ġpaper work", + "c ustom", + "Ġle v", + "Ġl ament", + "Ġpour ing", + "22 2", + "p olitical", + "ĠSupp lement", + "m aid", + "Ġcruel ty", + "Ġt read", + "ys ics", + "A w", + "rit es", + "Ġmod ifier", + "ĠP osition", + "Ad am", + "l b", + "ub s", + "Ġimper fect", + "Ġcl usters", + "ĠEngine er", + "ĠC herry", + "Ġinaug uration", + "ĠS au", + "Ġembod iment", + "ĠUn cle", + "Ġover r", + "Ġexplos ions", + "c ule", + "ĠPrinc eton", + "ĠAndre a", + "Ġincorrect ly", + "Ġearn est", + "Ġpil gr", + "ĠS print", + "Ġslee ve", + "Ġhe ars", + "ĠAm azing", + "Ġbrow sing", + "ag in", + "Ġhom eland", + "Ġha w", + "Ġd iving", + "ist ered", + "17 8", + "Ġbarg aining", + "ĠArc ade", + "Ġdeleg ate", + "ters on", + "................................ ................................", + "ĠJackson ville", + "27 5", + "Ġst agn", + "Ġad am", + "ĠSher man", + "C B", + "Ġsub urb", + "ĠFood s", + "Ġconver ting", + "ĠAr ist", + "Ġch ambers", + "l ove", + "Ġam ino", + "ĠG an", + "Ġmad ness", + "m c", + "ĠUS E", + "def ined", + "Ġul tr", + "ind ust", + "Ġw olves", + "l ance", + "Add itionally", + "Ġcr acks", + "as ia", + "ĠRe ason", + "ĠP ump", + "Ġaccident al", + "ĠL aser", + "ĠR id", + "Ġinitial ized", + "ell i", + "Ġun named", + "Ġn oun", + "ĠPass ed", + "Ġhost age", + "ĠEth iop", + "sh irts", + "Ġun rel", + "ĠEmb assy", + "Ġ19 41", + "Ġat oms", + "Ġpur ported", + "16 4", + "ĠF i", + "Ġgall ons", + "ĠMon ica", + "Ġp g", + "en ment", + "Ġsort ed", + "ĠG ospel", + "Ġhe ights", + "Ġtr aced", + "Ġunder going", + "She ll", + "Ġs acks", + "Ġproport ions", + "Ġhall uc", + "F ont", + "ac et", + "Ġwar mer", + "ĠIN TER", + "Ġgrab bing", + "Pl ug", + "Ġreal ization", + "ĠBur ke", + "Ġen chant", + "AT ER", + "ĠSe ed", + "Ġabund ant", + "F M", + "Ġc ivic", + "V s", + "is i", + "Ġv ow", + "Ġre per", + "ĠPartners hip", + "Ġpenet ration", + "Ġax e", + "Ġsh attered", + "ĠZ ombies", + "Ġv inyl", + "ĠAl ert", + "e on", + "Ġoblig ed", + "ĠIll ust", + "ĠPl aza", + "ĠFront ier", + "Ġdavid jl", + "ĠSer ial", + "ĠH av", + "ĠNut rition", + "B i", + "Ġâĸ Ī", + "ĠJ ays", + "lin ux", + "Ġhur ry", + "Ġv oy", + "Ġhop eless", + "ĠSte alth", + "Ġ ãģ", + "ess ors", + "tt le", + "b org", + "ĠSaf ari", + "f ell", + "Ġw ary", + "d ue", + "ĠAb ove", + "H a", + "E LL", + "Ġnot or", + "ĠW on", + "T oo", + "Ġoccup ations", + "Ġposs essions", + "Ġinv iting", + "Ġpred ators", + "Ġacceler ated", + "Ġ15 7", + "uter te", + "ĠC ube", + "e ast", + "acc ount", + "G ive", + "Ġtrans plant", + "red ients", + "id able", + "Ġscreens hots", + "ĠG und", + "ĠF S", + "Ġtravel ers", + "Ġsens ory", + "ĠF iat", + "ĠRock ets", + "İ ĭ", + "_ {", + "F riend", + "Ġchar ming", + "AL S", + "Ġenjoy ment", + "m ph", + "Ġ5 000", + "ĠRE G", + "Ù Ĩ", + "b ia", + "Ġcomp ilation", + "ro st", + "ĠV P", + "ĠSch ne", + "201 9", + "Ġcop ying", + "M ORE", + "ĠFl ore", + "f alls", + "2 15", + "t otal", + "Ġdis ciples", + "d ouble", + "Ġexceed ing", + "Ġsm ashed", + "Ġconcept ual", + "ĠRom ania", + "ĠB rent", + "ĠI CE", + "ĠT ou", + "Ġg rap", + "Ġn ails", + "18 9", + "ãĥ ĺ", + "Ġproc ure", + "e ur", + "Ġconfir ming", + "ĠC ec", + "aw i", + "ĠEd en", + "Ġn g", + "Ġengine ered", + "at ics", + "Ġhook ed", + "Ġdisgust ing", + "ĠMur der", + "ãĤ ¿", + "L ibrary", + "Ġ16 8", + "Al most", + "hem atic", + "Men u", + "ĠNot re", + "ĠJ ur", + "Ġkidn apped", + "Ġhack er", + "ĠJ ade", + "Ġcreep y", + "Ġdraw ings", + "ĠSpons or", + "Ġcycl ists", + "ĠGob lin", + "Ġoptim ized", + "Ġst aged", + "ĠMc D", + "bet ween", + "A ge", + "en o", + "S ex", + "ĠW ide", + "n ings", + "av is", + "Ġincap able", + "ĠK ob", + "Ġreward ing", + "ĠL one", + "oles cent", + "Ġcontract ed", + "Ġstick y", + "J ose", + "B all", + "f est", + "ĠIn put", + "ĠRec ently", + "Ġto mat", + "squ are", + "App lication", + "Ġnit rogen", + "Ġdupl icate", + "ĠRec on", + "ĠD ear", + "L ondon", + "Ġint ra", + "Ġd ock", + "Ġout reach", + "ĠM illion", + "Ġmamm als", + "am pton", + "V AL", + "Ġsn aps", + "Ġd os", + "ĠWh ole", + "ĠRead y", + "T ry", + "ĠWinn ipeg", + "ear ance", + "Ġinc urred", + "ren ched", + "ĠNS W", + "il ot", + "rain e", + "Ġc ube", + "g ot", + "Ġrun way", + "etermin ed", + "ĠHaw ks", + "Ġsurviv or", + "ĠW ish", + "ĠD in", + "ĠDE F", + "ĠV ault", + "18 7", + "Ġmush rooms", + "Ġcris p", + "be y", + "ĠDisco very", + "Ġdevelopment al", + "Ġparad igm", + "Ġcha otic", + "ĠT su", + "Ġ3 33", + "b ons", + "Ġbacter ial", + "Ġcomm its", + "Ġcos mic", + "Ġme ga", + "oc ative", + "ĠP aint", + "ophob ic", + "Ġv ain", + "Ġcar ved", + "ĠTh ief", + "ĠG ul", + "ows hip", + "Ġc ites", + "ĠEd inburgh", + "Ġdimin ished", + "Ġacknowled ges", + "ĠK ills", + "Ġmic row", + "ĠHer a", + "Ġsen iors", + "Ġwhere by", + "H op", + "at ron", + "Ġun available", + "ĠN ate", + "Ġ4 80", + "Ġsl ated", + "ĠRe becca", + "ĠB attery", + "Ġgram mar", + "Ġhead set", + "Ġcurs or", + "Ġex cluding", + "any e", + "aunder ing", + "eb in", + "Ġfeas ible", + "ĠPub lishing", + "ĠLab s", + "ĠCl iff", + "ĠFerr ari", + "Ġp ac", + "vis ible", + "mark ed", + "pe ll", + "Ġpol ite", + "Ġstagger ing", + "ĠGal actic", + "Ġsuper st", + "Ġpar an", + "ĠOffic ers", + "ãĢ ģ", + "Ġspecific s", + "ul us", + "23 9", + "ĠP aste", + "AM P", + "ĠPan ama", + "ĠDe lete", + "angu ard", + "rest rial", + "Ġhero ic", + "ĠD y", + "ا ÙĦ", + "Ġincumb ent", + "Ġcr unch", + "t ro", + "Ġsc oop", + "Ġblog ger", + "Ġsell ers", + "ure n", + "Ġmedic ines", + "ĠC aps", + "ĠAnim ation", + "ox y", + "Ġout ward", + "Ġinqu iries", + "22 9", + "Ġpsych ologist", + "ĠS ask", + "ev il", + "Ġcontam inated", + "ãĤ ¨", + "he rence", + "Ġbrand ed", + "ĠAbd ul", + "z h", + "Ġparagraph s", + "Ġmin s", + "Ġcor related", + "er b", + "Ġimp art", + "Ġmil estone", + "ĠSol utions", + "ot le", + "Ġunder cover", + "Ġmar ched", + "ĠCharg ers", + "f ax", + "ĠSec rets", + "Ġr uth", + "we ather", + "Ġfemin ine", + "Ġsh am", + "Ġprest igious", + "igg ins", + "Ġs ung", + "hist ory", + "ett le", + "gg ie", + "Ġout dated", + "ol and", + "Ġper ceptions", + "ĠS ession", + "ĠDod gers", + "u j", + "ĠE ND", + "D oc", + "Ġdefic iency", + "Gr and", + "ĠJ oker", + "Ġretro spect", + "Ġdiagn ostic", + "Ġharm less", + "Ġro gue", + "ĠA val", + "E qu", + "Ġtrans c", + "ĠRoberts on", + "ĠDep ending", + "ĠBurn s", + "iv o", + "Ġhost ility", + "F eatures", + "ĵ ĺ", + "Ġdis comfort", + "ĠL CD", + "spec ified", + "ĠEx pect", + "3 40", + "Ġimper ative", + "ĠReg ular", + "Ch inese", + "Ġstate wide", + "Ġsy mm", + "Ġlo ops", + "Ġaut umn", + "N ick", + "Ġsh aping", + "Ġqu ot", + "Ġc herry", + "ĠCross ref", + "è¦ ļéĨĴ", + "Stand ard", + "he ed", + "ĠD ell", + "ĠViet namese", + "Ġo st", + "ĠV alkyrie", + "O A", + "Ass ad", + "Ġreb ound", + "ĠTra ffic", + "pl aces", + "æ ĺ", + "ĠB uc", + "17 2", + "Ġshel ters", + "Ġins isting", + "ĠCertain ly", + "ĠKenn eth", + "ĠT CP", + "Ġpen al", + "ĠRe play", + "he ard", + "Ġdial ect", + "iz a", + "ĠF Y", + "it cher", + "ĠD L", + "Ġspir al", + "Ġquarterback s", + "Ġh ull", + "Ġgo ogle", + "Ġto dd", + "ĠSter ling", + "ĠPl ate", + "Ġsp ying", + "mb ol", + "ĠReal m", + "ĠPro ced", + "ĠCr ash", + "Ġtermin ate", + "Ġprotest ing", + "C enter", + "gu ided", + "Ġun cover", + "Ġboy cott", + "Ġreal izes", + "s ound", + "Ġpret ending", + "ĠV as", + "19 80", + "Ġfram ed", + "Ġ13 9", + "Ġdesc ended", + "Ġrehab ilitation", + "Ġborrow ing", + "ĠB uch", + "Ġbl ur", + "R on", + "ĠFro zen", + "en za", + "Ch ief", + "ĠP oor", + "Ġtransl ates", + "M IN", + "Ġ2 12", + "J ECT", + "Ġerupt ed", + "Ġsuccess es", + "S EC", + "Ġpl ague", + "Ġg ems", + "d oms", + "Ġstret ches", + "ĠSp y", + "Ġstory telling", + "C redit", + "ĠP ush", + "Ġtra ction", + "Ġin effective", + "ĠL una", + "Ġt apes", + "Ġanaly tics", + "erc ise", + "Ġprogram mes", + "ĠCar bon", + "Ġbeh old", + "he avy", + "ĠConserv ation", + "ĠF IR", + "Ġs ack", + "ter min", + "ric ks", + "Ġhous ed", + "Ġunus ually", + "I ce", + "Ġexecut ing", + "ĠMor oc", + "ed ay", + "Ġed itions", + "Ġsm arter", + "ĠB A", + "Ġout law", + "Ġvan ished", + "ib a", + "AL SE", + "ĠSil va", + "23 8", + "C ould", + "Ġphilos opher", + "Ġevac uated", + "Sec ret", + "14 2", + "Ġvis as", + "ãĤ ¬", + "ĠM alt", + "ĠClear ly", + "ĠN iger", + "ĠC airo", + "ĠF ist", + "3 80", + "ĠX ML", + "aut o", + "it ant", + "Ġrein forced", + "Rec ord", + "ĠSurviv or", + "G Hz", + "Ġscrew s", + "parent s", + "Ġo ceans", + "ma res", + "Ġbra kes", + "vas ive", + "Ġhell o", + "ĠS IM", + "rim p", + "Ġo re", + "ĠArm our", + "24 7", + "Ġterr ific", + "Ġt ones", + "14 1", + "ĠMin utes", + "Ep isode", + "Ġcur ves", + "Ġinflamm atory", + "Ġbat ting", + "ĠBeaut iful", + "L ay", + "Ġunp op", + "v able", + "Ġr iots", + "ĠTact ics", + "b augh", + "ĠC ock", + "Ġorg asm", + "ĠS as", + "Ġconstruct or", + "et z", + "G ov", + "Ġant agon", + "Ġthe at", + "Ġde eds", + "ha o", + "c uts", + "ĠMc Cl", + "Ġu m", + "ĠScient ists", + "Ġgrass roots", + "ys sey", + "\"] =>", + "Ġsurf aced", + "Ġsh ades", + "Ġneighb ours", + "Ġad vertis", + "oy a", + "Ġmer ged", + "Up on", + "Ġg ad", + "Ġanticip ate", + "Any way", + "Ġsl ogan", + "Ġdis respect", + "I ran", + "ĠT B", + "act ed", + "Ġsubp oen", + "medi ately", + "OO OO", + "Ġwa iver", + "Ġvulner abilities", + "ott esville", + "ĠHuff ington", + "J osh", + "ĠD H", + "M onday", + "ĠEll en", + "K now", + "x on", + "it ems", + "22 8", + "Ġf ills", + "ĠN ike", + "Ġcum ulative", + "and als", + "I r", + "Ġ ì", + "Ġfr iction", + "ig ator", + "Ġsc ans", + "ĠVi enna", + "ld om", + "Ġperform ers", + "P rim", + "Ġb idding", + "M ur", + "Ġlean ed", + "ĠPri x", + "al ks", + "Ġ[ âĢ¦]", + "ĠTw itch", + "ĠDevelop er", + "ĠG ir", + "Ġcall back", + "Ab stract", + "Ġacc ustomed", + "Ġfreed oms", + "ĠP G", + "ur acy", + "Ġl ump", + "is man", + ",, ,,", + "19 92", + "ĠR ED", + "Ġwor m", + "M atch", + "ĠPl atinum", + "I J", + "ĠOwn er", + "Tri via", + "com pl", + "Ġnew born", + "Ġfant as", + "O wn", + "Ġ19 59", + "Ġsymp ath", + "Ġub iqu", + "Ġoutput s", + "Ġal lev", + "Ġpr ag", + "K evin", + "Ġfav ors", + "Ġbur ial", + "Ġn urt", + "so lete", + "c ache", + "Ġ15 6", + "Ġunl ocks", + "te chn", + "M aking", + "Ġcon quer", + "ad ic", + "æ ĸ", + "Ġel f", + "Ġelect orate", + "ĠKurd s", + "ĠSt ack", + "ĠSam urai", + "Ġâ ĺħ", + "Ġ{ }", + "ĠS aid", + "ĠFall out", + "Ġkind ness", + "ĠCustom s", + "ĠBou levard", + "Ġhelicop ters", + "ot ics", + "ĠVe get", + "com ment", + "Ġcritic ised", + "Ġpol ished", + "ĠRem ix", + "ĠC ultural", + "Ġrec ons", + "Ġdo i", + "at em", + "Sc reen", + "Ġbar red", + "Com ments", + "ĠGener ally", + "Ġsl ap", + "7 20", + "V ari", + "p ine", + "Ġem pt", + "Ġh ats", + "ĠPlay ing", + "l ab", + "a verage", + "form s", + "ĠC otton", + "Ġcan s", + "ĠD ON", + "ĠSom alia", + "C rypt", + "ĠIncre ases", + "E ver", + "mod ern", + "Ġsur geon", + "3 000", + "Ġrandom ized", + "================================ ================================", + "B ern", + "im pl", + "ĠC OR", + "Ġpro claim", + "th ouse", + "Ġto es", + "Ġam ple", + "Ġpres erving", + "Ġdis bel", + "gr and", + "B esides", + "Ġsil k", + "ĠPat tern", + "h m", + "Ġenter prises", + "Ġaffidav it", + "ĠAdvis ory", + "Ġadvert ised", + "ĠRel igious", + "se ctions", + "psy ch", + "ĠField s", + "aw ays", + "Ġhasht ag", + "ĠNight mare", + "Ġv ampire", + "Ġfore nsic", + "rosso ver", + "n ar", + "Ġn avy", + "Ġvac ant", + "ĠD uel", + "Ġhall way", + "Ġface book", + "ident ally", + "ĠN RA", + "Ġm att", + "Ġhur ricane", + "ĠKir by", + "ĠP uzzle", + "Ġsk irt", + "ou st", + "du llah", + "Ġanal ogy", + "in ion", + "Ġtomat oes", + "ĠN V", + "ĠPe ak", + "ĠMe yer", + "Ġappoint ments", + "Ġm asc", + "Ġal ley", + "re hend", + "Ġchar ities", + "Ġund o", + "Ġdest inations", + "ĠTest ing", + "\"> \"", + "c ats", + "* .", + "Ġgest ures", + "gener al", + "Le ague", + "Ġpack ets", + "ĠInspect or", + "ĠBer g", + "Ġfraud ulent", + "Ġcritic ize", + "F un", + "Ġbl aming", + "nd ra", + "Ġsl ash", + "ĠE ston", + "Ġpropos ing", + "Ġwh ales", + "Ġtherap ist", + "Ġsub set", + "Ġle isure", + "EL D", + "ĠC VE", + "ĠAct ivity", + "Ġcul min", + "sh op", + "ĠD AY", + "is cher", + "ĠAdmir al", + "ĠAtt acks", + "Ġ19 58", + "Ġmem oir", + "Ġfold ed", + "Ġsex ist", + "Ġ15 3", + "ĠL I", + "Ġread ings", + "Ġembarrass ment", + "ĠEmploy ment", + "w art", + "ch in", + "Ġcontin uation", + "l ia", + "Rec ently", + "Ġd uel", + "Ġevac uation", + "ĠKash mir", + "Ġdis position", + "ĠR ig", + "Ġbol ts", + "Ġins urers", + "4 67", + "M ex", + "Ġret aliation", + "Ġmis ery", + "Ġunre asonable", + "r aining", + "I mm", + "ĠP U", + "em er", + "Ġgen ital", + "ãĤ ³", + "ĠC andy", + "Ġon ions", + "ĠP att", + "lin er", + "Ġconced ed", + "Ġf a", + "Ġfor c", + "ĠH ernandez", + "ĠGe off", + "deb ian", + "ĠTe ams", + "Ġc ries", + "Ġhome owners", + "23 7", + "A BC", + "Ġst itch", + "Ġstat istic", + "Ġhead ers", + "ĠBi ology", + "Ġmot ors", + "ĠG EN", + "ĠL ip", + "Ġh ates", + "Ġhe el", + "S elf", + "i pl", + "ED IT", + "ort ing", + "Ġann ot", + "ĠSpe ech", + "old emort", + "ĠJ avascript", + "ĠLe Bron", + "Ġfoot print", + "Ġf n", + "Ġseiz ures", + "n as", + "h ide", + "Ġ19 54", + "ĠBe e", + "ĠDecl aration", + "ĠKat ie", + "Ġreserv ations", + "N R", + "f emale", + "Ġsatur ated", + "Ġb iblical", + "Ġtroll s", + "Dev ice", + "ph otos", + "Ġdr ums", + "ãĥīãĥ© ãĤ´ãĥ³", + "N ight", + "f ighter", + "ĠH ak", + "ri ber", + "Ġc ush", + "Ġdiscipl inary", + "ba um", + "ĠG H", + "ĠSch midt", + "ilib rium", + "Ġs ixty", + "ĠKush ner", + "ro ts", + "Ġp und", + "ĠR ac", + "Ġspr ings", + "Ġcon ve", + "Bus iness", + "F all", + "Ġqual ifications", + "Ġvers es", + "Ġnarc iss", + "ĠK oh", + "ĠW ow", + "ĠCharl ottesville", + "ed o", + "Ġinterrog ation", + "ĠW ool", + "36 5", + "B rian", + "Ġâľ ĵ", + "Ġalleg es", + "ond s", + "id ation", + "ĠJack ie", + "y u", + "Ġl akes", + "Ġworth while", + "Ġcryst als", + "ĠJud a", + "Ġcomp rehend", + "Ġfl ush", + "Ġabsor ption", + "ĠO C", + "Ġfright ened", + "ĠCh ocolate", + "Mart in", + "Ġbu ys", + "Ġbu cks", + "Ġapp ell", + "ĠChampions hips", + "Ġlist ener", + "ĠDef ensive", + "Ġc z", + "ud s", + "ĠM ate", + "Ġre play", + "Ġdecor ated", + "Ġs unk", + "ĠV IP", + "ĠAn k", + "Ġ19 5", + "aa aa", + "Nob ody", + "ĠMil k", + "ĠG ur", + "ĠM k", + "ĠS ara", + "Ġse ating", + "ĠW id", + "Tr ack", + "Ġemploy s", + "Ġgig antic", + "AP P", + "ãĤ §", + "in ventory", + "Ġtow el", + "at che", + "l asting", + "ĠT L", + "Ġlat ency", + "Ġkn e", + "B er", + "me aning", + "Ġup held", + "Ġplay ground", + "Ġm ant", + "S ide", + "Ġstere o", + "Ġnorth west", + "Ġexception ally", + "Ġr ays", + "Ġrec urring", + "D rive", + "Ġup right", + "Ġab duct", + "ĠMar athon", + "Ġgood bye", + "Ġal phabet", + "h p", + "Ġcourt room", + "ring ton", + "ot hing", + "T ag", + "Ġdiplom ats", + "Ġbar bar", + "ĠAqu a", + "18 3", + "33 33", + "Ġmat urity", + "Ġinst ability", + "ĠAp ache", + "Ġ= ==", + "Ġfast ing", + "ĠGr id", + "Mod Loader", + "Ġ15 2", + "A bs", + "ĠOper ating", + "ett i", + "Ġacqu aint", + "Don nell", + "ĠK em", + "ĠFor ge", + "Ġarm ored", + "M il", + "Ġphilos ophers", + "in vest", + "Pl ayers", + "â Ī", + "Ġmy riad", + "Ġcomr ades", + "R ot", + "Ġremember ing", + "Ġcorrespond s", + "Ġprogram mers", + "ĠLyn n", + "Ġo lig", + "Ġco herent", + "yn chron", + "ĠChem ical", + "Ġj ugg", + "p air", + "post s", + "E ye", + "ĠIn ner", + "Ġsem ester", + "ott est", + "ĠEmir ates", + "ric anes", + "or ously", + "m its", + "ĠW is", + "Ġd odge", + "l ocation", + "Ġf aded", + "Am azon", + "ĠPro ceed", + "ĠIN FO", + "j ournal", + "ĠTru ck", + "T en", + "Ġ2 17", + "Ġstat utes", + "m obile", + "ĠT ypes", + "Rec omm", + "b uster", + "pe x", + "Ġleg ends", + "Ġhead ache", + "f aced", + "ĠWi Fi", + "if ty", + "ĠH ER", + "Ġcirc uits", + "ER ROR", + "22 6", + "ol in", + "Ġcyl inder", + "osp ace", + "ik ers", + "P rem", + "Qu ant", + "Ġconflic ting", + "Ġslight est", + "Ġfor ged", + "ion age", + "Step hen", + "ĠK ub", + "ĠOpp ortun", + "ĠHe al", + "Ġbl o", + "Ġrul ers", + "Ġh uh", + "Ġsubmar ine", + "f y", + "ass er", + "Ġallow ance", + "ĠKas ich", + "ĠT as", + "ĠAustral ians", + "Forge ModLoader", + "ĠâĨ ij", + "ĠMat rix", + "am ins", + "Ġ12 00", + "ĠAc qu", + "23 6", + "D ocument", + "ĠBre aking", + "19 3", + "ĠSub st", + "ĠRoll er", + "ĠPro perties", + "ĠN I", + "t ier", + "Ġcr ushing", + "Ġadvoc ating", + "Further more", + "keep ers", + "Ġsex ism", + "x d", + "Ġcall er", + "ĠS ense", + "chie ve", + "ĠT F", + "Ġfuel ed", + "Ġreminis cent", + "Ġobs ess", + "ur st", + "Ġup hold", + "ĠF ans", + "het ics", + "Ġâ Ĺ", + "ĠB ath", + "Ġbe verage", + "Ġo scill", + "25 4", + "Ġpol es", + "Ġgrad ual", + "Ġex ting", + "ĠS uff", + "ĠS uddenly", + "Ġlik ing", + "Ġ19 49", + "un ciation", + "am ination", + "ĠO mar", + "ĠL V", + "ĠCon sequently", + "Ġsynt hes", + "ĠG IF", + "Ġp ains", + "Ġinteract ing", + "u ously", + "inc re", + "Ġrum or", + "ĠScient ology", + "19 7", + "ĠZ ig", + "Ġspe lling", + "ĠA SS", + "Ġexting u", + "ms on", + "Ġg h", + "Ġremark ed", + "ĠStrateg ic", + "ĠM ON", + "å ¥", + "g ae", + "ĠWH AT", + "E ric", + "ĠCamp us", + "Ġmeth ane", + "Ġimag in", + "J UST", + "ĠAl m", + "X T", + "i q", + "ĠR SS", + "Ġwrong doing", + "att a", + "Ġbig ot", + "Ġdemonstr ators", + "ĠCal vin", + "ĠV illa", + "Ġmembr ane", + "ĠAw esome", + "Ġbenef ic", + "26 8", + "Ġmagn ificent", + "ĠL ots", + "G reg", + "ĠBor is", + "Ġdetain ees", + "ĠH erman", + "Ġwhis pered", + "Ġa we", + "Prof essor", + "fund ing", + "Ġphys iological", + "ĠDest ruction", + "Ġlim b", + "Ġmanip ulated", + "Ġbub bles", + "Ġpse ud", + "Ġhyd ra", + "ĠBrist ol", + "Ġst ellar", + "ĠExp ansion", + "ĠK ell", + "ĠInterest ingly", + "Ġm ans", + "Ġdrag ging", + "Ġec ological", + "ĠF it", + "Ġg ent", + "Ġbenef ited", + "ĠHait i", + "Ġpoly g", + "ãĥ İ", + "Ġ20 30", + "Ġpro w", + "Ġrecon struction", + "Ġwas t", + "Ġpsych ic", + "ĠGree ks", + "Hand ler", + "16 2", + "ĠP ulse", + "Ġsol icit", + "Ġsy s", + "Ġinflu x", + "ĠG entle", + "per cent", + "Ġprolifer ation", + "Ġtax able", + "Ġdisreg ard", + "Ġesc aping", + "Ġg inger", + "Ġwith stand", + "Ġdevast ated", + "ĠD ew", + "ser ies", + "Ġinject ed", + "ela ide", + "Ġturn over", + "he at", + "Ļ Ĥ", + "H appy", + "ĠSil ent", + "ãĤ Ń", + "iv ism", + "Ġir rational", + "AM A", + "Ġre ef", + "r ub", + "Ġ16 2", + "Ġbank ers", + "ĠEth ics", + "v v", + "Ġcritic isms", + "K n", + "18 6", + "M ovie", + "ĠT ories", + "Ġno od", + "Ġdist ortion", + "F alse", + "od ore", + "Ġt asty", + "Res earch", + "ĠU ID", + "- )", + "Ġdivor ced", + "ĠM U", + "ĠHay es", + "ĠIs n", + "ian i", + "ĠH Q", + "Ġ\" #", + "ign ant", + "Ġtra umatic", + "ĠL ing", + "H un", + "Ġsab ot", + "on line", + "r andom", + "Ġren amed", + "ra red", + "K A", + "d ead", + "é t", + "ĠAss istance", + "Ġse af", + "++++ ++++", + "Ġse ldom", + "ĠWeb b", + "Ġbo olean", + "u let", + "Ġref rain", + "ĠDI Y", + "ru le", + "Ġshut ting", + "Ġutil izing", + "load ing", + "ĠPar am", + "co al", + "oot er", + "Ġattract ing", + "ĠD ol", + "Ġher s", + "ag netic", + "ĠRe ach", + "im o", + "Ġdisc arded", + "ĠP ip", + "01 5", + "ü r", + "Ġm ug", + "Im agine", + "C OL", + "Ġcurs ed", + "ĠSh ows", + "ĠCurt is", + "ĠSach s", + "spe aking", + "ĠV ista", + "ĠFram ework", + "ong o", + "Ġsub reddit", + "Ġcr us", + "ĠO val", + "R ow", + "g rowing", + "Ġinstall ment", + "Ġgl ac", + "ĠAdv ance", + "EC K", + "ĠLGBT Q", + "LE Y", + "Ġac et", + "Ġsuccess ive", + "ĠNic ole", + "Ġ19 57", + "Qu ote", + "Ġcircumst ance", + "ack ets", + "Ġ14 2", + "ort ium", + "Ġguess ed", + "ĠFr ame", + "Ġperpet rators", + "ĠAv iation", + "ĠBen ch", + "Ġhand c", + "A p", + "Ġ19 56", + "25 9", + "r and", + "Net Message", + "d in", + "urt les", + "h ig", + "ĠV III", + "ff iti", + "ĠSw ords", + "b ial", + "Ġkidn apping", + "dev ice", + "Ġb arn", + "ĠEl i", + "auc as", + "S end", + "Con structed", + "Ġ ½", + "Ġneed les", + "Ġad vertisements", + "Ġv ou", + "Ġexhib ited", + "ĠFort ress", + "As k", + "B erry", + "TY PE", + "Ġcan cers", + "ump ing", + "ĠTerrit ory", + "Ġpr ud", + "Ġn as", + "Ġathe ist", + "Ġbal ances", + "ãģ Ł", + "ĠSh awn", + "& &", + "Ġland sc", + "ĠR GB", + "Ġpet ty", + "Ġex cellence", + "Ġtransl ations", + "Ġpar cel", + "ĠChe v", + "E ast", + "ĠOut put", + "im i", + "Ġamb ient", + "ĠTh reat", + "Ġvill ains", + "Ġ5 50", + "IC A", + "Ġtall er", + "Ġle aking", + "c up", + "Ġpol ish", + "Ġinfect ious", + "ĠK C", + "Ġ@ @", + "back ground", + "Ġbureaucr acy", + "ĠS ai", + "un less", + "it ious", + "ĠSky pe", + "At l", + "ID ENT", + "00 8", + "Ġhyp ocr", + "Ġpit chers", + "Ġguess ing", + "ĠF INAL", + "Bet ween", + "Ġvill agers", + "Ġ25 2", + "f ashion", + "ĠTun is", + "Be h", + "ĠEx c", + "ĠM ID", + "28 8", + "ĠHas kell", + "19 6", + "ĠN OR", + "Ġspec s", + "Ġinv ari", + "Ġgl ut", + "ĠC ars", + "Ġimp ulse", + "Ġhon ors", + "g el", + "Ġjurisd ictions", + "ĠBund le", + "ul as", + "Calif ornia", + "ĠIncre ase", + "Ġp ear", + "Ġsing les", + "Ġc ues", + "Ġunder went", + "ĠW S", + "Ġexagger ated", + "Ġdub ious", + "Ġfl ashing", + "L OG", + ") ].", + "J ournal", + "t g", + "V an", + "ĠI stanbul", + "ĠIn sp", + "ĠFrank en", + "D raw", + "Ġsad ness", + "Ġiron ic", + "ĠF ry", + "x c", + "Ġ16 4", + "is ch", + "W ay", + "ĠProtest ant", + "h orn", + "Ġun aff", + "ĠV iv", + "ill as", + "ĠProduct ions", + "ĠH ogan", + "Ġper imeter", + "ĠS isters", + "Ġspont aneous", + "Ġdown side", + "Ġdescend ants", + "Ġor n", + "w orm", + "Japan ese", + "Ġ19 55", + "Ġ15 1", + "ĠDo ing", + "els en", + "umb les", + "Ġrad ically", + "ĠDr um", + "ĠB ach", + "Ġli abilities", + "ĠO B", + "ĠElement ary", + "Ġmem e", + "yn es", + "Ġfinger print", + "ĠGr ab", + "Ġundert ake", + "Mem bers", + "ĠRead er", + "ĠSim s", + "g od", + "Ġhypot hetical", + "s cient", + "ĠA J", + "Ġchar ism", + "Ġad missions", + "ĠMiss ile", + "tr ade", + "Ġexerc ising", + "ĠBack ground", + "W ritten", + "Ġvoc als", + "whe ther", + "Ġv i", + "ĠW inner", + "Ġl itter", + "ĠSh ooting", + "ST EM", + "ãĤ ¡", + "ĠA FL", + "Ġvari ability", + "Ġe ats", + "ĠD PS", + "b row", + "Ġeleph ants", + "Ġstr at", + "Ġ Å", + "Ġsett lers", + "Matt hew", + "Ġin advert", + "H I", + "ĠIM F", + "ĠGo al", + "Ġnerv es", + "John son", + "ey e", + "ablish ment", + "Th ursday", + "BIL ITY", + "H ad", + "am oto", + "het amine", + "ep s", + "Ġmit ochond", + "Ġcomp ressed", + "ĠTre vor", + "ĠAnim als", + "T ool", + "L ock", + "Ġtwe ak", + "Ġpin ch", + "Ġcancell ation", + "P ot", + "Ġfoc al", + "ĠAst ron", + "17 3", + "ĠA SC", + "ĠO THER", + "umn i", + "Ġdem ise", + "d l", + "Ù ħ", + "Sem itism", + "Ġcr acking", + "Ġcollabor ative", + "Ġexpl ores", + "s ql", + "Ġher bs", + "Ġconfig urations", + "m is", + "ĠRes ult", + "ace y", + "ĠSm oke", + "Ġsan ct", + "el ia", + "Ġdeg ener", + "Ġdeep est", + "Ġscream ed", + "Ġn ap", + "Soft ware", + "ĠST AR", + "E F", + "ĠX in", + "spons ored", + "mans hip", + "23 3", + "Ġprim aries", + "Ġfilter ing", + "Ġas semble", + "m il", + "ĠMy ers", + "b ows", + "Ġpun ched", + "M ic", + "Ġinnov ations", + "Ġfun c", + "and o", + "Ġfr acking", + "ĠV ul", + "о Ð", + "osh op", + "ĠIm mun", + "Ġsett ling", + "Ġadolesc ents", + "Ġreb uilding", + "Ġtransform ing", + "Ġpar ole", + "Ġhar bor", + "Ġbook ing", + "ot ional", + "onge vity", + "ĠY o", + "b ug", + "Ġemer ges", + "ĠMethod s", + "ĠCh u", + "P res", + "ĠDun geons", + "Ġtra iling", + "ĠR um", + "ĠH ugh", + "å¤ ©", + "ĠE ra", + "ĠBatt les", + "Res ults", + "ĠTr ading", + "Ġvers a", + "c ss", + "ax ies", + "he et", + "Ġgre ed", + "19 89", + "Ġgard ens", + "Ġconting ent", + "P ark", + "ĠLeaf s", + "h ook", + "ro be", + "Ġdiplom acy", + "ĠF uel", + "ĠInv asion", + "Ġupgr ading", + "M ale", + "Ġe lic", + "Ġrelent less", + "ĠCo venant", + "ap esh", + "ĠT rop", + "T y", + "pro duction", + "art y", + "Ġpun ches", + "ak o", + "cyclop edia", + "ĠR abbit", + "ĠHD MI", + "Ġ14 1", + "Ġf oil", + "Item Image", + "ĠF G", + "Ġimplement ations", + "ĠP om", + "ixt ures", + "Ġaw ait", + "Ġ3 30", + "am us", + "Ġumb rella", + "Ġfore see", + "se par", + "Ġcircum cision", + "Ġperipher al", + "S ay", + "ĠExper t", + "In c", + "Ġwithd rew", + "ĠAnd ers", + "f ried", + "Ġradio active", + "ĠOp ening", + "Ġboard ing", + "ĠN D", + "Ġover throw", + "Act iv", + "W P", + "ĠAct s", + "× Ļ", + "Ġmot ions", + "v ic", + "ĠM ighty", + "ĠDef ender", + "a er", + "Ġthank ful", + "ĠK illing", + "ĠBr is", + "mo il", + "Ġpredict ing", + "26 6", + "ch oice", + "Ġkill ers", + "Ġinc ub", + "ĠChe st", + "ather ing", + "Ġpro claimed", + "fl ower", + "oss om", + "umbled ore", + "ĠCy cling", + "ĠOccup y", + "AG ES", + "P en", + "ĠY ug", + "Ġpack aged", + "Ġheight ened", + "c ot", + "st ack", + "C ond", + "Ġst amps", + "m age", + "Ġpersu aded", + "Ġens l", + "ĠCard inal", + "Ġsol itary", + "Ġpossess ing", + "ĠC ork", + "Ġev id", + "ĠT ay", + "Ġbl ues", + "Ġextrem ism", + "Ġlun ar", + "Ġcl own", + "Te chn", + "Ġfest ivals", + "ĠPv P", + "ĠL ar", + "Ġconsequ ently", + "p resent", + "Ġsom eday", + "ç İĭ", + "ĠMet eor", + "Ġtour ing", + "c ulture", + "Ġbe aches", + "S hip", + "c ause", + "ĠFl ood", + "ãĥ ¯", + "Ġpur ity", + "th ose", + "Ġem ission", + "b olt", + "Ġch ord", + "ĠScript ure", + "L u", + "Ġ$ {", + "cre ated", + "Other s", + "25 8", + "Ġelement al", + "Ġannoy ed", + "ĠA E", + "d an", + "ĠS ag", + "Res earchers", + "Ġfair y", + "âĢĵ âĢĵ", + "======== ====", + "Sm art", + "GG GG", + "Ġskelet ons", + "Ġpup ils", + "link ed", + "Ġur gency", + "en abled", + "ĠF uck", + "Ġcoun cill", + "r ab", + "U AL", + "T I", + "Ġlif es", + "Ġconf essed", + "B ug", + "Ġharm on", + "ĠCON FIG", + "ĠNe utral", + "D ouble", + "Ġst aple", + "ĠSH A", + "Brit ish", + "ĠSN P", + "AT OR", + "oc o", + "Ġswing ing", + "ge x", + "ole on", + "pl ain", + "ĠMiss ing", + "ĠTro phy", + "v ari", + "ran ch", + "Ġ3 01", + "4 40", + "00000000 00000000", + "Ġrest oring", + "Ġha ul", + "uc ing", + "ner g", + "Ġfut ures", + "Ġstrateg ist", + "quest ion", + "Ġlater al", + "ĠB ard", + "Ġs or", + "ĠRhod es", + "ĠD owntown", + "????? -", + "ĠL it", + "ĠB ened", + "Ġco il", + "st reet", + "ĠPort al", + "FI LE", + "ĠG ru", + "* ,", + "23 1", + "ne um", + "Ġsuck ed", + "Ġr apper", + "Ġtend encies", + "ĠLaure n", + "cell aneous", + "26 7", + "Ġbrow se", + "Ġover c", + "head er", + "o ise", + "Ġbe et", + "ĠG le", + "St ay", + "Ġm um", + "Ġtyp ed", + "Ġdiscount s", + "T alk", + "ĠO g", + "ex isting", + "ĠS ell", + "u ph", + "C I", + "ĠAust rian", + "ĠW arm", + "Ġdismiss al", + "Ġaver ages", + "c amera", + "Ġalleg iance", + "L AN", + "=\" #", + "Ġcomment ators", + "ĠSet ting", + "ĠMid west", + "Ġpharm ac", + "ĠEX P", + "Ġstain less", + "Ch icago", + "Ġt an", + "24 4", + "Ġcountry side", + "ĠV ac", + "29 5", + "Ġpin ned", + "Ġcr ises", + "Ġstandard ized", + "T ask", + "ĠJ ail", + "ĠD ocker", + "col ored", + "f orth", + "\" },", + "Ġpat rons", + "Ġsp ice", + "Ġm ourn", + "ĠM ood", + "Ġlaund ry", + "Ġequ ip", + "ĠM ole", + "y ll", + "ĠTH C", + "n ation", + "ĠSher lock", + "Ġiss u", + "ĠK re", + "ĠAmeric as", + "ĠA AA", + "Ġsystem atically", + "Ġcont ra", + "ĠS ally", + "Ġrational e", + "Ġcar riage", + "Ġpe aks", + "Ġcontrad iction", + "ens ation", + "ĠFail ure", + "Ġpro ps", + "Ġnames pace", + "Ġc ove", + "field s", + "ãĤ ĭ", + "Ġw ool", + "ĠC atch", + "Ġpresum ed", + "ĠD iana", + "r agon", + "ig i", + "Ġh amm", + "Ġst unt", + "ĠG UI", + "ĠObserv atory", + "ĠSh ore", + "Ġsmell s", + "ann ah", + "Ġcock pit", + "ĠD uterte", + "8 50", + "Ġopp ressed", + "bre aker", + "ĠCont ribut", + "ĠPer u", + "ĠMons anto", + "ĠAtt empt", + "Ġcommand ing", + "Ġfr idge", + "ĠR in", + "ĠChe ss", + "ual ity", + "Ġo l", + "Republic an", + "ĠGl ory", + "ĠW IN", + ".... ...", + "ag ent", + "read ing", + "Ġin h", + "J ones", + "Ġcl icks", + "al an", + "Ġ[ ];", + "ĠMaj esty", + "ĠC ed", + "op us", + "ate l", + "à ª", + "AR C", + "ĠEc uador", + "ãĥ ł", + "ĠK uro", + "Ġritual s", + "Ġcapt ive", + "Ġoun ce", + "Ġdisag reement", + "Ġsl og", + "f uel", + "P et", + "M ail", + "Ġexerc ised", + "Ġsol ic", + "Ġrain fall", + "Ġdev otion", + "ĠAss essment", + "Ġrob otic", + "opt ions", + "ĠR P", + "ĠFam ilies", + "ĠFl ames", + "Ġassign ments", + "00 7", + "aked own", + "Ġvoc abulary", + "Re illy", + "Ġc aval", + "g ars", + "Ġsupp ressed", + "ĠS ET", + "ĠJohn s", + "Ġwar p", + "bro ken", + "Ġstat ues", + "Ġadvoc ated", + "Ġ2 75", + "Ġper il", + "om orph", + "ĠF emin", + "per fect", + "Ġh atch", + "L ib", + "5 12", + "Ġlif elong", + "3 13", + "Ġche eks", + "Ġnum bered", + "ĠM ug", + "B ody", + "ra vel", + "We ight", + "ĠJ ak", + "ĠHe ath", + "Ġkiss ing", + "ĠJ UST", + "Ġw aving", + "u pload", + "Ġins ider", + "ĠPro gressive", + "ĠFil ter", + "tt a", + "ĠBe am", + "Ġviol ently", + "ip ation", + "Ġskept icism", + "Ġ19 18", + "ĠAnn ie", + "ĠS I", + "Ġgen etics", + "Ġon board", + "at l", + "ĠFried man", + "ĠB ri", + "cept ive", + "Ġpir ate", + "ĠRep orter", + "27 8", + "Ġmyth ology", + "Ġe clipse", + "Ġsk ins", + "Ġgly ph", + "ing ham", + "F iles", + "C our", + "w omen", + "Ġreg imes", + "Ġphotograp hed", + "K at", + "ĠMA X", + "Offic ials", + "Ġunexpected ly", + "Ġimpress ions", + "F ront", + ";;;; ;;;;", + "Ġsuprem acy", + "Ġs ang", + "Ġaggrav ated", + "Ġabrupt ly", + "ĠS ector", + "Ġexc uses", + "Ġcost ing", + "ide press", + "St ack", + "ĠR NA", + "ob il", + "Ġghost s", + "ld on", + "at ibility", + "Top ics", + "Ġreim burse", + "ĠH M", + "ĠDe g", + "Ġth ief", + "y et", + "ogen esis", + "le aning", + "ĠK ol", + "ĠB asketball", + "Ġf i", + "ĠSee ing", + "Ġrecy cling", + "Ġ[ -", + "Cong ress", + "Ġlect ures", + "P sy", + "Ġne p", + "Ġm aid", + "Ġori ented", + "A X", + "Ġrespect ful", + "re ne", + "fl ush", + "ĠUn loaded", + "re quest", + "gr id", + "ĠAltern atively", + "ĠHug o", + "Ġdec ree", + "ĠBuddh ism", + "and um", + "And roid", + "ĠCong o", + "ĠJoy ce", + "Ġacknowled ging", + "hes ive", + "ĠTom orrow", + "ĠH iro", + "th ren", + "ĠM aced", + "Ġho ax", + "ĠIncre ased", + "ĠPr adesh", + "W ild", + "____ __", + "16 1", + "Ġa unt", + "Ġdistribut ing", + "ĠT ucker", + "ĠSS L", + "ĠW olves", + "B uilding", + "ou lt", + "ĠLu o", + "ĠY as", + "ĠSp ir", + "ĠSh ape", + "ĠCamb od", + "ĠIP v", + "Ġm l", + "Ġext rad", + "39 0", + "ĠPenn y", + "d ream", + "Ġstation ed", + "opt ional", + "ew orthy", + ". ", + "ĠWorks hop", + "ĠRet ail", + "ĠAv atar", + "6 25", + "N a", + "ĠV C", + "ĠSec ure", + "M Y", + "19 88", + "oss ip", + "Ġpro state", + "Ġund en", + "Ġg amer", + "ĠCont ents", + "ĠWar hammer", + "ĠSent inel", + "3 10", + "Ġse gregation", + "ĠF lex", + "ĠM AY", + "Ġdr ills", + "ĠDrug s", + "Islam ic", + "Ġsp ur", + "Ġca fe", + "Ġimag inary", + "Ġgu iding", + "Ġsw ings", + "ĠThe me", + "ob y", + "Ġn ud", + "Ġbe gging", + "Ġstr ongh", + "Ġreject ing", + "Ġpedest rians", + "ĠPro spect", + "R are", + "s le", + "Ġconcess ions", + "ĠConst itutional", + "Ġbe ams", + "Ġfib ers", + "p oon", + "Ġinstinct s", + "pro perty", + "ĠB IG", + "Sand ers", + "im ates", + "Ġco ating", + "Ġcorps es", + "ĠTR UE", + "check ed", + "Ġ16 6", + "A sh", + "ĠJ S", + "ĠF iction", + "Ġcommun al", + "Ġener getic", + "oooo oooo", + "Ġnow adays", + "IL D", + "ib o", + "ĠSU V", + "R en", + "Ġdwell ing", + "Sil ver", + "Ġt ally", + "ĠM oving", + "Ġcow ard", + "Ġgener als", + "Ġhorn s", + "Ġcirc ulated", + "Ġrob bed", + "ĠUn limited", + "Ġharass ed", + "Ġinhib it", + "Ġcomp oser", + "ĠSpot ify", + "Ġspread s", + "3 64", + "Ġsu icidal", + "Ġno ises", + "ĠSt ur", + "Ġs aga", + "ĠK ag", + "is o", + "Ġtheoret ically", + "M oney", + "Ġsimilar ity", + "Ġslic ed", + "ut ils", + "ing es", + "\" -", + "Ġan th", + "Ġimp ed", + "Mod ule", + "Through out", + "Ġmen us", + "comm ittee", + "and i", + "ob j", + "in av", + "f ired", + "ĠAb dullah", + "Ġund ead", + "Ġfont s", + "H old", + "EN G", + "Ġsustain ability", + "Ġfl ick", + "Ġr azor", + "ĠF est", + "ĠChar acters", + "Ġword ing", + "Ġpopul ist", + "Ġcritic izing", + "Ġm use", + "v ine", + "Ġcard board", + "Ġkind ly", + "Ġfr inge", + "ĠThe ft", + "icult ural", + "Ġgovern ors", + "Ġ ����", + "Ġ16 3", + "Ġtime out", + "ĠA uth", + "Child ren", + "A U", + "Ġred emption", + "ĠAl ger", + "Ġ19 14", + "Ġw aved", + "Ġastron auts", + "og rams", + "Ġsw amp", + "ĠFinn ish", + "Ġcand le", + "Ġton nes", + "ut m", + "Ġr ay", + "Ġsp un", + "Ġfear ful", + "art icles", + "Ġca us", + "or ically", + "ĠRequ ires", + "ĠG ol", + "Ġpop e", + "Ġinaug ural", + "Ġg le", + "AD A", + "ĠIS IL", + "ĠOff ensive", + "Ġwatch dog", + "Ġbal con", + "ent ity", + "ĠH oo", + "Ġgall on", + "AC C", + "Ġdoub ling", + "Ġimpl ication", + "ĠS ight", + "Ġdoct r", + "---- ---", + "Ġ\\ \\", + "Ġm alt", + "R oll", + "Ġâī ¥", + "Ġrec ap", + "add ing", + "u ces", + "ĠB end", + "fig ure", + "Ġtur key", + "Ġsoc ietal", + "ĠT ickets", + "Ġcommer cially", + "Ġsp icy", + "Ġ2 16", + "ĠR amp", + "Ġsuperior ity", + "à ¯", + "ĠTr acker", + "C arl", + "ĠC oy", + "ĠPatri ot", + "Ġconsult ed", + "Ġlist ings", + "Ġsle w", + "reens hot", + "ĠG one", + "Ġ[ ...]", + "30 9", + "Ġh ottest", + "Ø ±", + "Ġrock y", + "ĠD iaz", + "Ġmass age", + "Ġpar aly", + "Ġp ony", + "A z", + "Ġcart ridge", + "ĠN Z", + "Ġsn ack", + "ĠLam ar", + "ple ment", + "ĠLes lie", + "Ġm ater", + "Ġsn ipp", + "24 6", + "Ġjoint ly", + "ĠBris bane", + "ĠiP od", + "Ġpump ing", + "Ġgo at", + "ĠSh aron", + "eal ing", + "Ġcor on", + "Ġan omal", + "rah im", + "ĠConnect ion", + "Ġsculpt ure", + "Ġsched uling", + "ĠD addy", + "at hing", + "Ġeyeb rows", + "Ġcur ved", + "Ġsent iments", + "Ġdraft ing", + "D rop", + "( [", + "Ġnom inal", + "ĠLeaders hip", + "ĠG row", + "Ġ17 6", + "Ġconstruct ive", + "iv ation", + "Ġcorrupt ed", + "ger ald", + "ĠC ros", + "ĠChe ster", + "ĠL ap", + "ãģ ª", + "OT H", + "D ATA", + "Ġal mond", + "pro bably", + "I mp", + "Ġfe ast", + "ĠWar craft", + "F lor", + "Ġcheck point", + "Ġtrans cription", + "Ġ20 4", + "Ġtwe aks", + "Ġrel ieve", + "S cience", + "Ġperform er", + "Z one", + "Ġtur moil", + "ig ated", + "hib it", + "ĠC afe", + "the med", + "Ġflu or", + "ben ch", + "Ġde com", + "ĠU nt", + "ĠBar rett", + "ĠF acts", + "Ġt asting", + "ĠPTS D", + "ĠSe al", + "ĠJuda ism", + "ĠDynam ic", + "ĠC ors", + "V e", + "ĠM ing", + "ĠTrans form", + "v on", + "ĠDef enders", + "ĠTact ical", + "ĠV on", + "ĠUn ivers", + "Ġdist orted", + "ĠB reath", + "?' \"", + "Ġag on", + "ĠDead ly", + "Ġl an", + "ĠCy cle", + "orn ed", + "Ġrel iably", + "Ġgl or", + "ĠMon key", + "ãĥ ¡", + "Ġad ren", + "Ġmicrow ave", + "ĠAl ban", + "irc raft", + "dig it", + "sm art", + "ĠD read", + "¯¯¯¯¯¯¯¯ ¯¯¯¯¯¯¯¯", + "{ {", + "ĠRoc hester", + "Ġsimpl ified", + "Ġinf licted", + "Ġtake over", + "Ġyour selves", + "ad itional", + "Ġmus cular", + "K S", + "Ġing en", + "T ax", + "ĠFe ature", + "27 7", + "Ġcru c", + "Ġcr ate", + "Ġun identified", + "Ġacclaim ed", + "ĠM anga", + "ĠFr ances", + "ĠNep al", + "ĠG erald", + "ĠKu wait", + "Ġsl ain", + "ĠHe b", + "ĠG oku", + "ãģ® æ", + "28 6", + "M rs", + "ĠC ody", + "ĠSan ctuary", + "01 6", + "Ġdism ant", + "Ġdatas et", + "ĠH ond", + "b uck", + "ĠPat terson", + "Ġpal ette", + "ĠG D", + "ic ol", + "ĠL odge", + "Ġplanet ary", + "ak in", + "ĠRegist ered", + "ab we", + "ĠPeters burg", + "Ġha iled", + "ĠP iece", + "S che", + "ĠDO J", + "Ġen umer", + "18 1", + "ĠObs erver", + "ĠB old", + "f ounded", + "com merce", + "Ġexplo its", + "ĠF inding", + "UR N", + "ĠS ne", + "ĠAc id", + "ay ette", + "ĠVal ues", + "Ġdr astic", + "Ġarchitect ural", + "Ġ\" .", + "× ķ", + "ump ed", + "Ġwra pping", + "Ġwid ow", + "ĠSl ayer", + "l ace", + "on ce", + "German y", + "av oid", + "Ġtem ples", + "P AR", + "à ´", + "ĠLuc ifer", + "ĠFl ickr", + "l ov", + "for ces", + "Ġsc outing", + "Ġlou der", + "tes y", + "Ġbefore hand", + "Ä ĵ", + "ĠNe on", + "ĠW ol", + "ĠTyp ically", + "ĠPolit ico", + "-+ -+", + "Ġbuild er", + "Ġder ive", + "K ill", + "Ġp oker", + "Ġambig uous", + "Ġlif ts", + "Ġcy t", + "Ġrib s", + "ood le", + "ĠS ounds", + "h air", + "ĠSynd rome", + "t f", + "Ġproport ional", + "u id", + "Ġper taining", + "ĠKind le", + "ĠNeg ro", + "Ġreiter ated", + "ĠTon ight", + "oth s", + "ĠCorn ell", + "Ġo wing", + "Ġ20 8", + "elf are", + "oc ating", + "ĠB irds", + "Sub scribe", + "Ġess ays", + "Ġburd ens", + "Ġillust rations", + "ar ious", + "ER AL", + "ĠCal cul", + "Ġx en", + "ĠLink edIn", + "ĠJ ung", + "Ġredes ign", + "Con nor", + "29 6", + "Ġrevers al", + "ĠAd elaide", + "ĠL L", + "Ġs inking", + "Ġg um", + "US H", + "c apt", + "ĠGr imm", + "Ġfoot steps", + "ĠCB D", + "isp ers", + "Ġpro se", + "Wed nesday", + "ĠM ovies", + "ed in", + "Ġoverturn ed", + "Ġcontent ious", + "US B", + "~~~~~~~~ ~~~~~~~~", + "ĠCo pper", + "Ġpoint less", + "N V", + "val ues", + "olph in", + "d ain", + "Ġdepos ited", + "ĠG W", + "Ġpreced ed", + "ĠCl a", + "ĠGo lem", + "ĠN im", + "ĠÎ ²", + "ĠEngine ers", + "m iddle", + "Ġfl att", + "oper ative", + "Ġcouncil s", + "imb abwe", + "el in", + "Ġstress ful", + "ĠL D", + "Ġres h", + "l ake", + "Ġwheel chair", + "ĠAltern ative", + "Ġoptim ize", + "oper ation", + "Ġpe ek", + "Ġones elf", + "ig il", + "Ġtrans itions", + "op athy", + "bl ank", + "Ġ16 9", + "17 1", + "________________________________ ________________________________", + "Ġl aundering", + "En c", + "ĠD EC", + "Ġwork outs", + "Ġsp ikes", + "Ġdin osaurs", + "Ġdiscrim inatory", + "P ool", + "R ather", + "38 5", + "R NA", + "tes ters", + "et o", + "ĠIdent ity", + "Ġve in", + "ĠBur ton", + "Ġarc ade", + "4 20", + "Ult imately", + "ĠSad ly", + "à °", + "p ill", + "Ġcub ic", + "ĠSpect rum", + "the se", + "st ates", + "Ġun official", + "h awks", + "ĠEVER Y", + "Ġrain bow", + "Ġincarcer ation", + "and ing", + "Ġsy ll", + "ĠEver ton", + "Ġ17 9", + "ĠSer bia", + "Ġ18 9", + "m eter", + "ĠMic key", + "Ġant iqu", + "Ġfact ual", + "ne ck", + "ĠN are", + "n orm", + "m ust", + "Ġhigh ways", + "Ġgl am", + "Ġdivid ing", + "ĠSquad ron", + "ĠMar tha", + "Ġbirth s", + "C over", + "//////// ////////", + "ĠW ong", + "Ph ot", + "ĠA LS", + "ri o", + "ĠNon etheless", + "ĠL emon", + "Ġ20 6", + "ĠE E", + "Ġderiv ative", + "ĠWW II", + "v ote", + "Ġthere in", + "Ġsepar ating", + "44 6", + "sy nc", + "ĠStre ets", + "Ġr att", + "Ġmunicip ality", + "ĠShort ly", + "Ġmon k", + ") ,\"", + "Ġscr ub", + "Ġoper atives", + "Ne ither", + "Pl ace", + "ĠLim it", + "F emale", + "ĠAct or", + "Char acter", + "Ġconstit uted", + "35 7", + "Ġprotest ed", + "ĠSt raw", + "ĠHe ight", + "ild a", + "ĠTy ph", + "Ġflood s", + "Ġcos metic", + "W AY", + "pert ure", + "up on", + "t ons", + "ess ing", + "ĠP ocket", + "Ġro oft", + "ĠC aucas", + "Ġant idepress", + "Ġincomp atible", + "EC D", + "Ġoper a", + "ĠCont est", + "Ġgener ators", + "l ime", + "Def ense", + "19 87", + "for um", + "Ġsav age", + "ĠHung arian", + "n z", + "Ġmet allic", + "Ġex pelled", + "Ġres idency", + "Ġdress es", + "66 6", + "ĠC lement", + "f ires", + "C ategory", + "Ġge ek", + "al is", + "Ġc emetery", + "educ ated", + "Ġc rawl", + "ĠUn able", + "ĠT yson", + "ak is", + "Ġp ardon", + "ĠW ra", + "Ġstrengthen ed", + "ĠF ors", + "33 5", + "ĠH C", + "ĠM ond", + "Ġvisual s", + "ĠBeat les", + "ett lement", + "Ġ ï", + "g ro", + "Ġb ash", + "Ġpo orest", + "Ġex cel", + "Ġaspir ations", + "ĠM unicip", + "ens ible", + "Ġceremon ies", + "Ġintimid ation", + "ĠCON TR", + "be ck", + "ĠK ap", + "as u", + "Ġtradem arks", + "ĠS ew", + "ĠComp etition", + "net work", + "ĠAr ri", + "ĠT et", + "Ro aming", + "W C", + "D at", + "Ġso b", + "Ġpair ing", + "Ġoverd ose", + "SA Y", + "ab er", + "Ġrev olt", + "ĠF ah", + "act ing", + "e q", + "est ation", + "F ight", + "ĠMar ks", + "27 3", + "Ġ17 8", + "R aw", + "ãģ ĭ", + "34 9", + "bl ocks", + "Ġver ge", + "est ine", + "ĠPod esta", + "Ġinv asive", + "Ġprofound ly", + "ĠA o", + "e ach", + "Ġl est", + "inter pret", + "Ġshr inking", + "Ġerr one", + "Ġche es", + "ly s", + "ĠI vy", + "ĠDirect ory", + "Ġhint ed", + "V ICE", + "Ġcontact ing", + "ĠG ent", + "he i", + "Ġlabel ing", + "Ġmerc ury", + "ĠL ite", + "Ġexp ires", + "Ġdest abil", + "rit is", + "c u", + "Ġfeather s", + "Ġste er", + "Ġprogram med", + "ĠV ader", + "Go ing", + "ĠE lim", + "Ġy o", + "ĠMic he", + "Ġ20 3", + "Ġslee ves", + "Ġb ully", + "ĠHum ans", + "36 8", + "Ġcomp ress", + "ĠBan ner", + "AR S", + "Ġa while", + "Ġcal ib", + "Ġspons orship", + "ĠDiff iculty", + "ĠP apers", + "Ġident ifier", + "} .", + "Ġy og", + "ĠSh ia", + "Ġclean up", + "Ġvib e", + "int rodu", + "im ming", + "Austral ia", + "Ġout lines", + "ĠY outube", + "tr ain", + "ĠM akes", + "Ġde ported", + "Ġcent r", + "ĠD ug", + "ĠB oulder", + "ĠBuff y", + "Ġinj unction", + "ĠHar ley", + "ĠG roups", + "ĠD umbledore", + "ĠCl ara", + "Ġ\" -", + "Ġsacrific ed", + "ep h", + "Sh adow", + "ib ling", + "Ġfreel ance", + "Ġevident ly", + "ph al", + "Ġret ains", + "M ir", + "Ġfin ite", + "d ar", + "ĠC ous", + "Ġrep aired", + "Ġperiod ic", + "Ġchampions hips", + "Ġaster oid", + "bl ind", + "Ġexpress ly", + "ĠAst ros", + "Ġsc aled", + "Ġge ographical", + "ĠRap ids", + "En joy", + "Ġel astic", + "ĠMoh amed", + "Mark et", + "be gin", + "Ġdisco vers", + "Ġtele communications", + "Ġscan ner", + "Ġen large", + "Ġsh arks", + "Ġpsy chedel", + "ĠRou ge", + "Ġsnap shot", + "is ine", + "X P", + "Ġpestic ides", + "ĠL SD", + "ĠDist ribution", + "re ally", + "Ġde gradation", + "Ġdisgu ise", + "Ġbi om", + "ĠEX T", + "Ġequ ations", + "Ġhaz ards", + "ĠComp ared", + ") *", + "Ġvirt ues", + "Ġeld ers", + "Ġenh ancing", + "ĠAc ross", + "er os", + "ang ling", + "Ġcomb ust", + "ucc i", + "Ġconc ussion", + "Ġcontrace ption", + "ĠK ang", + "Ġexpress es", + "Ġa ux", + "ĠP ione", + "Ġexhib its", + "Deb ug", + "OT AL", + "ĠAl ready", + "ĠWheel er", + "Ġexp ands", + "? :", + "Ġreconc iliation", + "Ġpir ates", + "Ġpur se", + "Ġdiscour age", + "Ġspect acle", + "R ank", + "Ġwra ps", + "ĠTh ought", + "Ġimp ending", + "O pp", + "ĠAng lo", + "ĠE UR", + "Ġscrew ed", + "ret ched", + "Ġencour agement", + "mod els", + "Ġconf use", + "mm m", + "ĠVit amin", + "âĸij âĸij", + "C ru", + "Ġkn ights", + "Ġdisc ard", + "Ġb ishops", + "ĠW ear", + "ĠGar rett", + "k an", + "ãĥ Ł", + "Ġmascul ine", + "cap ital", + "ĠA us", + "Ġfat ally", + "th anks", + "ĠA U", + "ĠG ut", + "12 00", + "Ġ 00000000", + "Ġsur rog", + "ĠBI OS", + "ra its", + "ĠWat ts", + "Ġresur rection", + "ĠElect oral", + "ĠT ips", + "4 000", + "Ġnut rient", + "Ġdepict ing", + "Ġspr ink", + "Ġm uff", + "ĠL IM", + "ĠS ample", + "ps c", + "ib i", + "gener ated", + "Ġspec imens", + "Ġdiss atisf", + "Ġtail ored", + "Ġhold ings", + "ĠMonth ly", + "ĠE at", + "po ons", + "Ġne c", + "ĠC age", + "ĠLot us", + "ĠLan tern", + "Ġfront ier", + "Ġp ensions", + "Ġj oked", + "ĠHard y", + "=-=- =-=-", + "r ade", + "U ID", + "Ġr ails", + "Ġem it", + "Ġsl ate", + "Ġsm ug", + "Ġsp it", + "ĠCall s", + "ĠJac obs", + "f eat", + "ĠU E", + "Ġrest ruct", + "Ġregener ation", + "Ġenerg ies", + "ĠCon nor", + "OH N", + "ĠChe ese", + "Ġg er", + "Ġresur rect", + "man agement", + "N W", + "Ġpres ently", + "ĠBru ins", + "M ember", + "ĠM ang", + "id an", + "Ġboost ing", + "w yn", + "+ .", + "requ isite", + "ĠNY PD", + "ĠMe gan", + "ĠCond itions", + "Ġp ics", + "nes ium", + "ĠR ash", + "Ġ17 4", + "ĠD ucks", + "Ġemb ro", + "z u", + "on ian", + "rel igious", + "Ġc raz", + "ĠAC A", + "ĠZ ucker", + "EM A", + "ĠPro s", + "We apon", + "ĠKn ox", + "ĠAr duino", + "Ġst ove", + "Ġheaven s", + "ĠP urchase", + "Ġher d", + "Ġfundra iser", + "Dig ital", + "5 000", + "Ġprop onents", + "/ âĢĭ", + "Ġj elly", + "ĠVis a", + "Ġmon ks", + "Ġadvance ment", + "ĠW er", + "Ġ18 7", + "e us", + "ert ility", + "Ġfet al", + "Ġ19 36", + "L o", + "Ġout fits", + "Ġstair case", + "b omb", + "Ġcustom ized", + "cl air", + "T ree", + "Ġm apped", + "ĠConsider ing", + "ĠTor res", + "Ġmeth yl", + "Ġapprox imate", + "Ġdo om", + "ĠHans en", + "Ġc rossover", + "Ġstand alone", + "ä ¼", + "Ġinv ites", + "Ġgra veyard", + "Ġh p", + "Donald Trump", + "Ġesc ort", + "G ar", + "Ġpredec essors", + "Ġh ay", + "Ġen zyme", + "ĠStra ight", + "vis ors", + "I ng", + "ane ously", + "ĠApp lied", + "Ġf ec", + "ĠDur ant", + "Ġout spoken", + "or b", + "Ġz eal", + "Ġdisgr ace", + "' ).", + "ĠChe ng", + "28 9", + "ĠRen a", + "ĠSu icide", + "29 4", + "Ġout raged", + "ĠNew man", + "ĠN vidia", + "ĠA ber", + "ĠB ers", + "Ġrecre ation", + "Wind ow", + "ĠD P", + "x e", + "Ġped oph", + "Ġfall out", + "ambo o", + "Ġpresent ations", + "ĠApp s", + "Ġh tml", + "3 45", + "ĠX XX", + "Ġrub bing", + "ĠLe ather", + "Ġhum idity", + "se ys", + "est ablished", + "ĠUn its", + "64 6", + "Ġrespect able", + "A uto", + "Ġthri ving", + "ĠInn ovation", + "ang s", + "Ext ra", + "reg ulation", + "29 8", + "p ick", + "Ex amples", + "ĠC J", + "Att ack", + "Ġdr acon", + "L T", + "Ġstick er", + "re rs", + "Ġsun ny", + "I ss", + "reg ulated", + "d im", + "ĠAb stract", + "Ġhus bands", + "Off ice", + "om ination", + "it ars", + "AN GE", + "asc al", + "ĠK ris", + "ĠInf antry", + "Ġm alf", + "ĠA the", + "ĠR ally", + "bal anced", + "................ ........", + "OU P", + "Ġmole cule", + "met ics", + "ĠSpl it", + "ĠInstruct ions", + "ĠN ights", + "c ards", + "Ġt ug", + "Ġcon e", + "å Ń", + "Ġt x", + "ĠDisc ussion", + "Ġcatast rophe", + "pp e", + "g io", + "Ġcommun ism", + "Ġhal ted", + "ĠGu ant", + "cle an", + "ĠSc hed", + "ĠK anye", + "Ġw ander", + "ĠSer iously", + "Ġ18 8", + "enn ial", + "f ollow", + "product ive", + "ĠFl ow", + "ĠS ail", + "Ġc raw", + "Ġsim ulations", + "or u", + "ang les", + "ĠN olan", + "Ġmen stru", + "4 70", + "Ġ20 7", + "aj a", + "Ġcas ually", + "board ing", + "Ġ2 22", + "ov y", + "ĠN umbers", + "um at", + "O E", + "28 7", + "ĠCle mson", + "Ġcert s", + "Ġsl id", + "ĠT ribe", + "Ġto ast", + "Ġfort unes", + "Ġf als", + "ĠComm ittees", + "Ġg p", + "Ġf iery", + "ĠN ets", + "ĠAn ime", + "Pack age", + "ĠComp are", + "l aughter", + "in fect", + "Ġatroc ities", + "Ġjust ices", + "Ġins ults", + "ĠVern on", + "Ġsh aken", + "Ġperson a", + "est amp", + "36 7", + "br ain", + "Ġexperiment ing", + "K en", + "ĠElect ronics", + "Ġ16 1", + "dom ain", + "Ġgraph ical", + "b ishop", + "Ġwho pping", + "ĠEv angel", + "Ġadvertis ers", + "ĠSpe ar", + "Ġb ids", + "Ġdestro ys", + "ut z", + "Ġunders c", + "ĠAD D", + "Ġan ts", + "ĠC um", + "ipp les", + "ĠF ill", + "Ġgl anced", + "Ġind icted", + "ĠE ff", + "Ġmis con", + "ĠDes ktop", + "Ġab ide", + "ãĥ Ģ", + "ĠI o", + "ĠC oul", + "Ġcaps ule", + "ĠCh rys", + "M ON", + "Ġund es", + "ĠI RA", + "Ġc itation", + "Ġdict ate", + "ĠNet works", + "ĠConf lict", + "ĠSt uff", + "x a", + "is ec", + "ĠChem istry", + "Ġquarter ly", + "William s", + "an an", + "O pt", + "ĠAlexand ria", + "out heastern", + "ĠSpring field", + "ĠBlack s", + "Ġge ography", + "24 2", + "Ġut most", + "ĠEx xon", + "ab outs", + "E VA", + "ĠEn able", + "ĠBar r", + "Ġdisag reed", + "ĠCy prus", + "Ġdement ia", + "Ġlab s", + "Ġubiqu itous", + "ĠLO VE", + "Ġconsolid ated", + "s r", + "Ġcream y", + "ĠTim ber", + "Reg ardless", + "ĠCert ificate", + "Ġ\" ...", + "ogen ous", + "Capt ain", + "Ġinsult ing", + "ĠSor os", + "ĠInst r", + "ĠBulgar ia", + "bet ter", + "Ġsuck ing", + "ĠDavid son", + "at z", + "Ġcoll ateral", + "g if", + "Ġplag ued", + "ĠC ancel", + "ĠGard ner", + "R B", + "Ġsix teen", + "Rem ove", + "ur istic", + "c ook", + "R od", + "Ġcompr ising", + "f le", + ") âĢĶ", + "ĠVik ing", + "g rowth", + "agon al", + "Ġsr f", + "af ety", + "m ot", + "N early", + "st own", + "ĠF actor", + "Ġautom obile", + "Ġproced ural", + "m ask", + "amp ires", + "Ġdisapp ears", + "j ab", + "3 15", + "Ġ19 51", + "ne eded", + "Ġd aring", + "le ader", + "Ġp odium", + "Ġun healthy", + "Ġm und", + "Ġpy ramid", + "oc re", + "Ġkiss ed", + "Ġdream ed", + "ĠFant astic", + "ĠG ly", + "å Ĭ", + "Ġgreat ness", + "Ġsp ices", + "Ġmet ropolitan", + "Ġcomp uls", + "i ets", + "101 6", + "ĠSh am", + "ĠP yr", + "fl ies", + "ĠMid night", + "Ġswall owed", + "Ġgen res", + "ĠL ucky", + "ĠRew ards", + "Ġdisp atch", + "ĠI PA", + "ĠApp ly", + "Ġa ven", + "al ities", + "3 12", + "th ings", + "Ġ( ).", + "Ġm ates", + "ĠS z", + "ĠC OP", + "ol ate", + "O FF", + "Ġre charge", + "c aps", + "ĠYork er", + "ic one", + "Ġgal axies", + "ile aks", + "D ave", + "ĠP uzz", + "ĠCelt ic", + "ĠA FC", + "27 6", + "ĠS ons", + "Ġaffirm ative", + "H or", + "Ġtutorial s", + "ĠC ITY", + "ĠR osa", + "ĠExt ension", + "Ser ies", + "Ġf ats", + "Ġr ab", + "l is", + "Ġun ic", + "Ġe ve", + "ĠSp in", + "Ġadul thood", + "ty p", + "Ġsect arian", + "Ġcheck out", + "ĠCy cl", + "S ingle", + "Ġmart yr", + "Ġch illing", + "88 8", + "ou fl", + "Ġ] ;", + "Ġcongest ion", + "m k", + "ĠWhere as", + "Ġ19 38", + "ur rencies", + "er ion", + "Ġbo ast", + "ĠPat ients", + "Ġch ap", + "ĠB D", + "real DonaldTrump", + "Ġexam ines", + "h ov", + "Ġstart ling", + "ĠBab ylon", + "w id", + "om ew", + "br ance", + "ĠOd yssey", + "w ig", + "Ġtor ch", + "ĠV ox", + "ĠMo z", + "ĠT roll", + "ĠAn s", + "Similar ly", + "ĠF ul", + "00 6", + "Un less", + "ĠAl one", + "st ead", + "ĠPub lisher", + "r ights", + "t u", + "ĠDoes n", + "Ġprofession ally", + "Ġcl o", + "ic z", + "Ġste als", + "Ġ á", + "19 86", + "Ġst urdy", + "ĠJoh ann", + "Ġmed als", + "Ġfil ings", + "ĠFr aser", + "d one", + "Ġmult inational", + "Ġf eder", + "Ġworth less", + "Ġp est", + "Yes terday", + "ank ind", + "Ġg ays", + "Ġb orne", + "ĠP OS", + "Pict ure", + "Ġpercent ages", + "25 1", + "r ame", + "Ġpot ions", + "AM D", + "ĠLeban ese", + "Ġr ang", + "ĠL SU", + "ong s", + "Ġpen insula", + "ĠCl ause", + "AL K", + "oh a", + "ĠMac Book", + "Ġunanim ous", + "Ġl enders", + "Ġhang s", + "Ġfranch ises", + "ore rs", + "ĠUp dates", + "Ġisol ate", + "and ro", + "S oon", + "Ġdisrupt ive", + "ĠSur ve", + "Ġst itches", + "ĠSc orp", + "ĠDomin ion", + "Ġsupp lying", + "Ar g", + "Ġtur ret", + "ĠL uk", + "Ġbr ackets", + "* )", + "ĠRevolution ary", + "ĠHon est", + "Ġnot icing", + "ĠSh annon", + "Ġafford ed", + "Ġth a", + "ĠJan et", + "! --", + "ĠNare ndra", + "ĠPl ot", + "H ol", + "se ver", + "e enth", + "Ġobst ruction", + "Ġ10 24", + "st aff", + "j as", + "or get", + "sc enes", + "l aughs", + "ĠF argo", + "cr ime", + "Ġorche str", + "Ġde let", + "ili ary", + "rie ved", + "Ġmilit ar", + "ĠGreen e", + "âĹ ı", + "ãģ ¦", + "ĠGu ards", + "Ġunle ashed", + "ĠWe ber", + "Ġadjust able", + "Ġcal iber", + "Ġmotiv ations", + "Ġà ł", + "m Ah", + "ĠL anka", + "hand le", + "Ġp ent", + "ĠR av", + "ĠAng ular", + "ĠK au", + "umb ing", + "Ġphil anthrop", + "Ġde hyd", + "Ġtox icity", + "e er", + "ĠY ORK", + "w itz", + "å ¼", + "ĠI E", + "commun ity", + "ĠA H", + "Ġret ali", + "Ġmass ively", + "ĠDani els", + "ĠD EL", + "Ġcar cin", + "Ur l", + "Ġrout ing", + "ĠNPC s", + "ĠR AF", + "ry ce", + "Ġwa ived", + "ĠGu atem", + "Every body", + "Ġco venant", + "Ġ17 3", + "Ġrelax ing", + "Ġqu art", + "al most", + "Ġguard ed", + "ĠSold iers", + "ĠPL AY", + "Ġout going", + "L AND", + "Ġre write", + "ĠM OV", + "ĠIm per", + "ĠS olution", + "Ġphenomen al", + "Ġl ongevity", + "Ġimp at", + "ĠN issan", + "ir ie", + "Ġod or", + "ĠZ ar", + "ok s", + "Ġmilit ias", + "ĠSP EC", + "Ġtoler ated", + "ars er", + "ĠBrad ford", + "+ ,", + "Ġsur real", + "s f", + "Can adian", + "Ġresemb lance", + "Ġcarbohyd rate", + "VI EW", + "Ġaccess ory", + "me al", + "larg est", + "ieg el", + "Some one", + "Ġtoug hest", + "os o", + "Ġfun nel", + "Ġcondemn ation", + "lu ent", + "Ġw ired", + "ĠSun set", + "Jes us", + "ĠP ST", + "ĠP ages", + "ĠTy coon", + "ĠP F", + "Ġselect ions", + "Ġ à¤", + "part isan", + "Ġhigh s", + "ĠR une", + "Ġcraft s", + "le ad", + "ĠParent s", + "Ġre claim", + "ek er", + "ĠAll ied", + "ae per", + "Ġlo oming", + "Ġbenefic iaries", + "ĠH ull", + "Stud ents", + "Jew ish", + "d j", + "Ġp act", + "tem plate", + "ĠOffic ials", + "ĠBay lor", + "Ġhe mp", + "Ġyouth s", + "ĠLevel s", + "ĠX iao", + "ĠC hes", + "Ġende avor", + "ĠRem oved", + "Ġhipp ocamp", + "H ell", + "ãĤ Ĭ", + "80 5", + "Ġd inosaur", + "ĠWr ath", + "ĠIndones ian", + "Ġcalcul ator", + "ĠD ictionary", + "Ġ4 20", + "ĠM AG", + "( _", + "! ,", + "t arians", + "Ġrestrict ing", + "rac use", + "Ġweek day", + "OU NT", + "Ġsh rugged", + "leg round", + "Ġb ald", + "ĠDo ctors", + "Ġt outed", + "ĠMax well", + "Ġ2 14", + "Ġdiplom at", + "Ġrep ression", + "Ġconstitu ency", + "v ice", + "r anked", + "ĠNap oleon", + "g ang", + "ĠFore ver", + "t un", + "Ġbul b", + "ĠPD T", + "ĠC isco", + "V EN", + "Ġres umed", + "Ste ven", + "ĠManit oba", + "Ġfab ulous", + "ĠAg ents", + "19 84", + "Ġam using", + "ĠMyster ies", + "Ġor thodox", + "fl oor", + "Ġquestion naire", + "Ġpenet rate", + "Ġfilm makers", + "ĠUn c", + "Ġst amped", + "Ġth irteen", + "Ġout field", + "Ġforward ed", + "Ġapp ra", + "Ġa ided", + "t ry", + "Ġunf ocused", + "ĠL iz", + "ĠWend y", + "ĠSc ene", + "Ch arg", + "Ġreject s", + "Ġleft ist", + "ĠProv idence", + "ĠBr id", + "reg n", + "Ġprophe cy", + "ĠL IVE", + "4 99", + "Ġfor ge", + "ĠF ML", + "Ġintrins ic", + "ĠF rog", + "Ġw ont", + "ĠH olt", + "Ġfam ed", + "CL US", + "aeper nick", + "ĠH ate", + "ĠC ay", + "Ġregister ing", + "ort ality", + "rop y", + "ocaly ptic", + "a an", + "n av", + "Ġfasc ist", + "IF IED", + "Ġimpl icated", + "ĠRes ort", + "ĠChand ler", + "ĠBr ick", + "P in", + "ys c", + "Us age", + "ĠHel m", + "us ra", + "âĺħ âĺħ", + "ĠAb bas", + "Ġunanim ously", + "Ġke eper", + "Ġadd icted", + "?? ?", + "Ġhelm ets", + "Ġant ioxid", + "aps ed", + "80 8", + "gi ene", + "Ġwa its", + "Ġmin ion", + "ra ved", + "ĠP orsche", + "Ġdream ing", + "Ġ17 1", + "ĠC ain", + "Ġun for", + "ass o", + "ĠConfig uration", + "k un", + "hard t", + "Ġn ested", + "ĠL DS", + "L ES", + "Ġt ying", + "en os", + "Ġc ue", + "ĠMar qu", + "sk irts", + "Ġclick ed", + "Ġexp iration", + "ĠAccording ly", + "ĠW C", + "Ġbless ings", + "Ġaddict ive", + "ĠN arr", + "y x", + "ĠJagu ars", + "Ġrent s", + "ĠS iber", + "Ġt ipped", + "ous se", + "ĠFitz gerald", + "Ġhier arch", + "out ine", + "Ġwa velength", + "> .", + "ch id", + "ĠProcess ing", + "/ +", + "r anking", + "E asy", + "ĠConst ruct", + "Ġt et", + "ins ured", + "H UD", + "Ġqu oting", + "Ġcommun icated", + "in x", + "Ġin mate", + "Ġerect ed", + "ĠAbs olutely", + "ĠSure ly", + "Ġun im", + "ĠThr one", + "he id", + "Ġcl aws", + "Ġsuper star", + "ĠL enn", + "ĠWh is", + "U k", + "ab ol", + "Ġsk et", + "ĠN iet", + "Ġper ks", + "Ġaff inity", + "Ġopen ings", + "phas is", + "Ġdiscrim inate", + "T ip", + "v c", + "Ġgr inding", + "ĠJenn y", + "Ġast hma", + "hol es", + "ĠHom er", + "Ġreg isters", + "ĠGl ad", + "Ġcre ations", + "Ġlith ium", + "Ġappl ause", + "unt il", + "Just ice", + "ĠTur ks", + "Ġsc andals", + "Ġb ake", + "t ank", + "M ech", + "ĠMe ans", + "ĠM aid", + "Republic ans", + "is al", + "wind ows", + "ĠSant os", + "Ġveget ation", + "33 8", + "t ri", + "Ġfl ux", + "ins ert", + "Ġclar ified", + "Ġmort g", + "ĠCh im", + "ĠT ort", + "Ġdiscl aim", + "met al", + "ĠAs ide", + "Ġindu ction", + "Ġinf l", + "Ġathe ists", + "amp h", + "Ġe ther", + "ĠV ital", + "ĠBu ilt", + "M ind", + "Ġweapon ry", + "S ET", + "Ġ18 6", + "ad min", + "g am", + "cont ract", + "af a", + "Ġderiv atives", + "Ġsn acks", + "Ġch urn", + "E conom", + "Ġca pped", + "ĠUnder standing", + "ĠH ers", + "ĠI z", + "Ġd uct", + "I ENT", + "augh ty", + "Ġâľ Ķ", + "ĠN P", + "Ġsa iling", + "In itialized", + "Ġt ed", + "Ġreact ors", + "ĠL omb", + "Ġcho ke", + "ĠW orm", + "Ġadm iration", + "Ġsw ung", + "ens ibly", + "Ġr ash", + "ĠGo als", + "ĠImport ant", + "Sh ot", + "ĠR as", + "Ġtrain ers", + "ĠB un", + "Work ing", + "Ġhar med", + "ĠPand ora", + "ĠL TE", + "Ġmush room", + "ĠCH AR", + "ĠF ee", + "ĠM oy", + "B orn", + "ol iberal", + "ĠMart ial", + "Ġgentle men", + "Ġling ering", + "Offic ial", + "Ġgra ffiti", + "ĠN ames", + "D er", + "Ġqu int", + "ist rate", + "aze era", + "ĠNOT ICE", + "ĠFlore nce", + "Ġpay able", + "Ġdep icts", + "ĠSpe cies", + "He art", + "âĶĢâĶĢâĶĢâĶĢ âĶĢâĶĢâĶĢâĶĢ", + "Ġencl osed", + "Incre ases", + "D aily", + "ĠL is", + "Ġenact ment", + "ĠB acon", + "ĠSt eele", + "dem and", + "Ġ18 3", + "Ġmouth s", + "Ġstr anded", + "Ġenhance ment", + "01 1", + "ĠWh ats", + "Ġhe aled", + "en y", + "ĠR ab", + "Ġ3 40", + "ĠLab yrinth", + "ro ach", + "ĠY osh", + "ĠCl ippers", + "Ġconcert s", + "Intern et", + "35 5", + "Ġstick ers", + "Ġter med", + "ĠAx e", + "Ġgrand parents", + "Fr ance", + "ĠCl im", + "ĠU h", + "ul ic", + "Ġthr ill", + "cent ric", + "ĠOver view", + "ĠCond uct", + "Ġsubstant ive", + "Ġ18 2", + "m ur", + "Ġstr ay", + "ĠCo ff", + "Ġrep etitive", + "ĠFor gotten", + "Ġqual ification", + "ew itness", + "ĠZ imbabwe", + "Ġsim ulated", + "ĠJ D", + "25 3", + "ĠW are", + "Ġun sc", + "T imes", + "Ġsum mons", + "Ġdis connected", + "Ġ18 4", + "ci us", + "ĠGu jar", + "od ka", + "Ġer ase", + "ĠTob acco", + "elect ed", + "Ġun cont", + "ĠShe pard", + "ĠL amp", + "Ġalert ed", + "Ġoper ative", + "arn a", + "u int", + "Ġneglig ence", + "ac ements", + "Ġsup ra", + "Ġprev ail", + "ĠSh ark", + "Ġbel ts", + "ãģ «", + "Ġt ighter", + "Engine ers", + "Ġin active", + "Ġexp onent", + "ĠWill ie", + "a ples", + "Ġhe ir", + "ĠH its", + "ian n", + "ĠS ays", + "Ġcurrent s", + "ĠBeng al", + "Ġar ist", + "B uffer", + "Ġbree ze", + "ĠWes ley", + "Col a", + "Ġpron oun", + "Ġde ed", + "ĠK ling", + "Ġof t", + "Ġinf lict", + "Ġpun ishing", + "Ġn m", + "ik u", + "OD UCT", + "01 4", + "Ġsubsid y", + "ĠDE A", + "ĠHer bert", + "ĠJ al", + "B ank", + "Ġdef erred", + "Ġship ment", + "B ott", + "Ġal le", + "b earing", + "HT ML", + "Off line", + "Ġ2 13", + "Ġscroll ing", + "Ġsc anned", + "ĠLib yan", + "ĠT OP", + "ch rom", + "d t", + "col umn", + "Psy NetMessage", + "Z ero", + "Ġtor so", + "0 50", + "âķ IJ", + "Ġimp erson", + "ĠSchw artz", + "ud ic", + "Ġpiss ed", + "ĠS app", + "25 7", + "ĠIS Ps", + "og l", + "Ġsuper vised", + "Ġad olescent", + "Ġatt ained", + "ĠDel ivery", + "ĠB unny", + "Ġ19 37", + "Ġmini ature", + "Ġo s", + "Ġ3 70", + "60 8", + "ĠMour inho", + "Ġinn ate", + "Ġtem po", + "ĠN M", + "ĠFall en", + "00 9", + "Ġprov ocative", + "Stream er", + "ĠBened ict", + "ĠBol she", + "Ġt urtle", + "ĠPC B", + "ĠEqu al", + "Direct or", + "ĠR end", + "Ġflu ids", + "Author ities", + "Ġcous ins", + "requ ency", + "ĠNeigh bor", + "s ets", + "sh ared", + "Char les", + "pass word", + "Ġg ears", + "Ġ2 11", + "ĠHard ware", + "ri ka", + "Ġup stream", + "H om", + "Ġdisproportion ately", + "iv ities", + "Ġund efined", + "Ġelect rons", + "Ġcommem or", + "Event ually", + "Ġ> <", + "Ġir responsible", + "2 18", + "ĠRe leased", + "ĠO VER", + "ĠI GN", + "ĠB read", + "st ellar", + "ĠS age", + "tt ed", + "dam age", + "ed ition", + "ĠPre c", + "Ġl ime", + "Ġconf inement", + "Ġcal orie", + "we apon", + "Ġdiff ering", + "ĠS ina", + "m ys", + "am d", + "Ġintric ate", + "k k", + "ĠP AT", + "ã o", + "st ones", + "lin ks", + "Ġr anch", + "Sem itic", + "Ġdifferent iate", + "ĠS inger", + "occup ied", + "Ġfort ress", + "c md", + "Ġinter ception", + "ĠAnk ara", + "Ġre pt", + "ĠSol itaire", + "Ġrem ake", + "p red", + "Ġd ared", + "aut ions", + "ĠB ACK", + "Run ning", + "Ġdebug ging", + "Ġgraph s", + "3 99", + "ĠNig el", + "Ġb un", + "Ġpill ow", + "Ġprog ressed", + "fashion ed", + "Ġob edience", + "ER N", + "Ġrehe ars", + "C ell", + "t l", + "S her", + "Ġher ald", + "ĠPay ment", + "ĠC ory", + "ĠDe pt", + "Ġrep ent", + "ĠWe ak", + "uck land", + "Ġple asing", + "Ġshort ages", + "Ġjur ors", + "ĠK ab", + "q qa", + "Ant i", + "Ġw ow", + "ĠRC MP", + "Ġt sun", + "ĠS ic", + "Ġcomp rises", + "Ġsp ies", + "Ġprec inct", + "n u", + "Ġur ges", + "Ġtim ed", + "Ġstrip es", + "ĠB oots", + "Ġy en", + "Adv anced", + "Ġdisc rete", + "ĠArch angel", + "employ ment", + "D iff", + "Ġmon uments", + "Ġ20 9", + "work er", + "Ġ19 6", + "ĠI g", + "utter stock", + "T PS", + "J ac", + "Ġhomeless ness", + "Ġcomment ator", + "Ġrac ially", + "f ing", + "se ed", + "E le", + "ell ation", + "Ġeth anol", + "Ġpar ish", + "ĠD ong", + "ĠAw akening", + "Ġdev iation", + "ĠB earing", + "ĠTsu k", + "Ġrec ess", + "Ġl ymph", + "ĠCann abis", + "å ľ", + "ĠNEW S", + "Ġd ra", + "ĠStef an", + "ĠWr ong", + "ĠS AM", + "Ġloose ly", + "Ġinterpre ter", + "ĠPl ain", + "Go vernment", + "Ġbigot ry", + "Ġgren ades", + "ave z", + "pict ured", + "Ġmand ated", + "ĠMon k", + "ĠPed ro", + "Ġl ava", + "27 4", + "Ġcyn ical", + "ĠScroll s", + "l ocks", + "M p", + "Ġcon gregation", + "orn ings", + "ph il", + "ĠI bid", + "Ġf erv", + "Ġdisapp earing", + "Ġarrog ant", + "sy n", + "ĠMa ver", + "ĠSu it", + "24 1", + "Ġab bre", + "ack ers", + "P a", + "ĠY el", + "Whe never", + "Ġ23 5", + "ĠV ine", + "ĠAn at", + "Ġext inct", + "LE T", + "Ġexecut able", + "V ERS", + "ox ide", + "D NA", + "ĠP rel", + "Ġresent ment", + "Ġcompr ise", + "ĠAv iv", + "Ġinter ceptions", + "Ġprol ific", + "IN A", + "ĠEr in", + "though t", + "2 19", + "ĠPsychiat ry", + "un ky", + "chem ist", + "H o", + "ĠMcC oy", + "Ġbr icks", + "L os", + "ri ly", + "ĠUS SR", + "Ġr ud", + "Ġl aud", + "ĠW ise", + "ĠEmer ald", + "Ġrev ived", + "Ġdam ned", + "ĠRep air", + "id em", + "ct ica", + "Ġpatri arch", + "ĠN urs", + "me g", + "Ġcheap est", + "re ements", + "empt y", + "ĠCele br", + "Ġdepri vation", + "ch anted", + "ĠTh umbnails", + "E nergy", + "ĠEth an", + "ĠQ ing", + "Ġopp oses", + "W IND", + "v ik", + "ĠM au", + "ĠS UB", + "66 7", + "G RE", + "ĠVol unte", + "nt on", + "C ook", + "å IJ", + "es que", + "Ġplum met", + "Ġsu ing", + "Ġpron ounce", + "Ġresist ing", + "ĠF ishing", + "ĠTri als", + "Ġy ell", + "Ġ3 10", + "Ġin duct", + "Ġpersonal ized", + "oft en", + "R eb", + "EM BER", + "Ġview point", + "Ġexist ential", + "() )", + "rem ove", + "MENT S", + "l asses", + "Ġev apor", + "Ġa isle", + "met a", + "Ġreflect ive", + "Ġentit lement", + "Ġdev ised", + "mus ic", + "asc ade", + "Ġwind ing", + "off set", + "Ġaccess ibility", + "ke red", + "Bet ter", + "ĠJohn ston", + "th inking", + "S now", + "ĠCroat ia", + "ĠAt omic", + "27 1", + "34 8", + "Ġtext book", + "ĠSix th", + "Ġ اÙĦ", + "Ġsl ider", + "ĠBur ger", + "b ol", + "S ync", + "Ġgrand children", + "Ġc erv", + "+ )", + "Ġe ternity", + "Ġtweet ing", + "Ġspec ulative", + "Ġpiv otal", + "ĠW P", + "ĠT ER", + "ynam ic", + "Ġu pl", + "ĠC ats", + "per haps", + "Ġclass mates", + "Ġblat ant", + "' -", + "Ġl akh", + "ant ine", + "ĠB org", + "i om", + "/ (", + "ĠAthlet ic", + "Ġs ar", + "OT A", + "ĠHoff man", + "Never theless", + "Ġad orable", + "Ġspawn ed", + "Ass ociated", + "ĠDom estic", + "Ġimpl ant", + "ĠLux em", + "ĠK ens", + "Ġp umps", + "ĠS AT", + "Att ributes", + "50 9", + "av our", + "Ġcentral ized", + "ĠT N", + "Ġfresh ly", + "ĠA chieve", + "Ġouts iders", + "her ty", + "ĠRe e", + "ĠT owers", + "ĠD art", + "ak able", + "Ġm p", + "ĠHeaven ly", + "Ġr ipe", + "ĠCarol ine", + "ry an", + "Ġclass ics", + "Ġret iring", + "Ġ2 28", + "Ġa h", + "Ġdeal ings", + "Ġpunch ing", + "ĠChap man", + "O ptions", + "max well", + "vol ume", + "Ġst al", + "Ġex ported", + "ĠQu ite", + "Ġnumer ical", + "B urn", + "F act", + "ĠKey stone", + "Ġtrend ing", + "Ġalter ing", + "ĠAfric ans", + "47 8", + "ĠM N", + "ĠKn ock", + "Ġtempt ation", + "Ġprest ige", + "Over view", + "ĠTrad itional", + "ĠBah rain", + "Priv ate", + "ĠH OU", + "Ġbar r", + "ĠT at", + "C ube", + "US D", + "ĠGrand e", + "ĠG at", + "ĠFl o", + "Ġres ides", + "Ġind ec", + "vol ent", + "Ġperpet ual", + "ub es", + "Ġworld view", + "ĠQuant um", + "Ġfil tered", + "Ġen su", + "orget own", + "ERS ON", + "ĠM ild", + "37 9", + "OT T", + "à ¥", + "Ġvit amins", + "Ġrib bon", + "Ġsincere ly", + "ĠH in", + "Ġeight een", + "Ġcontradict ory", + "Ġgl aring", + "Ġexpect ancy", + "Ġcons pir", + "Ġmon strous", + "Ġ3 80", + "re ci", + "Ġhand ic", + "Ġpump ed", + "Ġindic ative", + "Ġr app", + "Ġav ail", + "ĠLEG O", + "ĠMar ijuana", + "19 85", + "ert on", + "Ġtwent ieth", + "################ ################", + "ĠSw amp", + "Ġval uation", + "Ġaffili ates", + "adjust ed", + "ĠFac ility", + "26 2", + "Ġenz ymes", + "itud inal", + "Ġimp rint", + "S ite", + "Ġinstall er", + "ĠT RA", + "m ology", + "lin ear", + "ĠCollect ive", + "ig ating", + "ĠT oken", + "Ġspec ulated", + "K N", + "ĠC ly", + "or ity", + "Ġdef er", + "Ġinspect ors", + "appro ved", + "R M", + "ĠSun s", + "Ġinform ing", + "ĠSy racuse", + "ib li", + "7 65", + "Ġgl ove", + "Ġauthor ize", + "âĢ¦âĢ¦âĢ¦âĢ¦ âĢ¦âĢ¦âĢ¦âĢ¦", + "ĠCru ise", + "Ġcontract ing", + "she ll", + "IF E", + "ĠJew el", + "p ract", + "ĠPhot oshop", + "ĠKnow ing", + "h arm", + "Ġattract ions", + "ad an", + "et us", + "01 8", + "w agen", + "Al t", + "Ġmultip ly", + "Ġequ ilibrium", + ": {", + "ĠF ighters", + "ĠEd gar", + "Ġfour teen", + "Go vern", + "Ġmis use", + "Ġab using", + "Ġancest ry", + "ram er", + "64 4", + "Ġwor ms", + "Ġthick er", + "ĠComb ine", + "Ġpeas ants", + "Ġv ind", + "Ġcon quest", + "Ġm ocked", + "Ġc innamon", + "ĠC ald", + "ĠGall up", + "Ġavoid ance", + "Ġincarn ation", + "ĠStr at", + "Ġt asted", + "ent a", + "ĠN eal", + "p ared", + "Ġtermin ology", + "ject ion", + "Scient ists", + "ĠIN S", + "ĠDe e", + "Ġdirect ories", + "R oad", + "ĠSh ap", + "br ight", + "ĠDirect ors", + "ĠCol umn", + "Ġb ob", + "Ġprefer ably", + "Ġgl itch", + "f urt", + "Ġe g", + "id is", + "C BC", + "Ġsur rendered", + "Ġtest ament", + "33 6", + "ug gest", + "ĠN il", + "an other", + "Ġpat hetic", + "ĠDon na", + "Ġ2 18", + "ĠA very", + "Ġwhis key", + "Ġf ixture", + "ĠCon quest", + "Ġbet s", + "O cc", + "ĠLe icester", + "] .\"", + "Ġ) );", + "Ġfl ashes", + "45 6", + "Ġmask ed", + "ge bra", + "Ġcomput ed", + "che l", + "aud er", + "Ġdefe ats", + "ĠLiber ation", + "ĠOs ama", + "ĠV ive", + "Ch anges", + "Ch annel", + "Ġtar iffs", + "Ġm age", + "ĠS ax", + "Ġinadvert ently", + "ĠC RE", + "ĠRe aper", + "ink y", + "gr ading", + "Ġstere otyp", + "Ġcur l", + "ĠF ANT", + "Ġfram eworks", + "M om", + "ĠAn ch", + "Ġflav our", + "car bon", + "Ġperm itting", + "let cher", + "ĠMo zilla", + "ĠPark ing", + "ĠCh amp", + "Sc roll", + "Ġmurd erer", + "Ġrest ed", + "Ġow es", + "ĠP oss", + "AD D", + "IF F", + "res olution", + "ĠMin ing", + "Ġcompar ative", + "D im", + "Ġneighbour ing", + "ĠA ST", + "ĠT oxic", + "Ġbi ases", + "Ġgun fire", + "ur ous", + "ĠMom ent", + "19 83", + "Ġper vasive", + "tt p", + "ĠNorm ally", + "r ir", + "S arah", + "ĠAlb any", + "Ġun sett", + "ĠS MS", + "ip ers", + "l ayer", + "ĠWh ites", + "up le", + "Ġtur bo", + "ĠLe eds", + "Ġthat s", + "ĠMin er", + "M ER", + "ĠRe ign", + "Ġper me", + "ĠBl itz", + "Ġ19 34", + "Ġintimid ating", + "t ube", + "Ġecc entric", + "ab olic", + "box es", + "ĠAssoci ates", + "v otes", + "Ġsim ulate", + "um bo", + "aster y", + "Ġship ments", + "FF FF", + "an th", + "Ġseason ed", + "Ġexperiment ation", + "âĸ ł", + "law s", + "Me et", + "idd les", + "ant ics", + "R ating", + "IS IS", + "h ift", + "Ġfront s", + "b uf", + "01 7", + "Ġun att", + "ĠD il", + "le ases", + "ĠGard ens", + "77 7", + "t ouch", + "ve ll", + "45 8", + "Ġ= ====", + "s aving", + "Ġer osion", + "ĠQu in", + "Ġearn s", + "Ġaccomplish ment", + "ĠWe i", + "Ġ< [", + "____ _", + "Ġir rig", + "ĠT eddy", + "Ġconqu ered", + "ĠArm ored", + "Ġassert s", + "Ġmanip ulating", + "r é", + "Ġtranscript s", + "G allery", + "Ġplot ting", + "Ne il", + "Ġbetray al", + "load er", + "ĠS ul", + "Ġdispl acement", + "Ġroy alty", + "ĠW I", + "he it", + "ĠDev ices", + "alle l", + "Ġmunicipal ities", + "Ġcan al", + "St ars", + "ĠU AE", + "Ġ\" âĢ¦", + "ĠC U", + "ab ove", + "Ġreson ance", + "ĠguiActive Un", + "add ed", + "ĠBra ves", + "ĠI bn", + "Ġhere by", + "ĠB RE", + "Ġshare holder", + "ĠH ir", + "ĠJ i", + "Ġstrange ly", + "Ġadm ired", + "Ġpl ight", + "Ġb achelor", + "ĠP ole", + "cipl inary", + "T ony", + "ĠArmen ian", + "Ġun man", + "ĠZion ist", + "St age", + "isco ver", + "Ġautom otive", + "Ġs idelines", + "Ġsl ick", + "ĠRena issance", + "ĠF UN", + "Im ages", + "ĠH aj", + "Ġp ing", + "Ġshort cut", + "ĠBl vd", + "ĠLook s", + "Ġbur sts", + "Ġcl amp", + "Ġm ish", + "Ġsort ing", + "Ġpatri ot", + "Ġcorrect ness", + "ĠScand inav", + "ĠCaval iers", + "p ython", + "az ar", + "Ġ3 75", + "ĠJa une", + "40 9", + "Ġdetrim ental", + "Ġstab bing", + "Ġpoison ed", + "Ġf ountain", + "oc ent", + "or st", + "ĠMar i", + "Ġr ains", + "ĠO vers", + "ĠInst itution", + "ud get", + "AM Y", + "t ale", + "ĠK R", + "ĠPr ices", + "Ġhead aches", + "Ġlands l", + "ĠA ura", + "Bon us", + "ĠZ hao", + "ĠH ip", + "Ġhop s", + "ĠKurd istan", + "Ġexplo iting", + "ry n", + "Ġhypocr isy", + "op ening", + "Ġgun shot", + "Ġw ed", + "inter stitial", + "Inter stitial", + "Ġam en", + "Bre aking", + "Ġmarket ed", + "W ire", + "ĠC rowd", + "Contin ue", + "ĠK nown", + "ĠEffect ive", + "ore an", + "iz ons", + "Jose ph", + "Ġescal ation", + "us ername", + "Ġcur tain", + "AT ES", + "ĠP AR", + "ĠM iy", + "Ġcounter fe", + "l ene", + "Ġcont enders", + "d aily", + "ĠAs c", + "ĠPhill ip", + "most ly", + "Ġfil ename", + "he ne", + "Ġresemb ling", + "Ġst aging", + "ĠCh loe", + "Ġw iring", + "H on", + "ĠRen ew", + "ott age", + "ĠHy brid", + "m uch", + "Ġstro kes", + "Ġpolicy makers", + "AP TER", + "ĠArk ham", + "pl ot", + "Ġassist ants", + "Ġde port", + "ĠSe ga", + "Ġinflu enza", + "ĠC ursed", + "ĠK obe", + "Ġskin ny", + "Prov ider", + "ĠR ip", + "Ġincrement al", + "product s", + "B F", + "Ġd ome", + "ĠC redits", + "Ġlos ers", + "int s", + "ĠBet ty", + "ĠTal ent", + "ĠD AM", + "L v", + "E ss", + "Ġd ens", + "tem p", + "J udge", + "od ic", + "Ġ' (", + "UR ES", + "ets k", + "V O", + "Ġretrie ved", + "Ġarchitect s", + "Ù ĩ", + "Ġeth ic", + "ĠSecond ary", + "st ocks", + "ad ia", + "Ġ3 25", + "ĠOp inion", + "Ġsimultane ous", + "Ġd izz", + "ul p", + "Ġsmugg ling", + "ipp ery", + "R andom", + "f acing", + "ĠD as", + "Ġstock p", + "Ġdiscl osures", + "po inter", + "Ġcor al", + "ĠSe lection", + "ĠP ike", + "ival ent", + "Ġruth less", + "ĠR im", + "Ġensu ing", + "ĠExper iment", + "Ġcongress man", + "Ġbelie ver", + "Ġun specified", + "ĠM ord", + "Ġknowledge able", + "ĠV ERY", + "T X", + "Ġstra ps", + "Ġtur f", + "apesh ifter", + "Ġmar ital", + "Ġfl ock", + "ãģ Ĩ", + "26 3", + "AM ES", + "ĠOpp osition", + "Ġtre asures", + "ĠG OD", + "Ġmodel ed", + "ĠWOR LD", + "Ġ( [", + "ĠUs age", + "H F", + "Ġ$ (", + "uss ed", + "Ġpione er", + "E ight", + "par se", + "b read", + "rit z", + "ĠMir anda", + "ĠK ant", + "++ )", + "ore n", + "Ġprov oked", + "Ġbre eds", + "ĠIn cludes", + "ĠPast ebin", + "ĠFl ip", + "J ava", + "Ġbr ink", + "Ġrum ored", + "Ġun seen", + "Ġgar nered", + "ĠDef in", + "al ted", + "Ġtatt oos", + "Ġhes itation", + "is itions", + "ĠWe aver", + "ĠReport ing", + "Ġtherap ies", + "Ġconsult ants", + "Ġresid ual", + "ĠMal i", + "ĠRom a", + "i ago", + "ĠRes idents", + "ub i", + "Ġremed ies", + "Ġadapt ive", + "ĠAl ive", + "ĠBar cl", + "Ġwal lets", + "c rypt", + "etermin ation", + "ĠPel osi", + "Ġsl ipping", + "oton in", + "Ġall iances", + "pat rick", + "ir is", + "Ġor th", + "ĠPer kins", + "ĠDe V", + "ĠG ets", + "Ġdry ing", + "ge e", + "fore st", + "ĠFor get", + "ore m", + "33 9", + "Ġvague ly", + "ĠD ion", + "ĠP orn", + "ĠH OW", + "Ġp neum", + "Ġrub ble", + "ĠT aste", + "enc ia", + "ĠG el", + "Ġd st", + "Ġ24 5", + "ĠMoroc co", + "inf lamm", + "ĠTw ins", + "Ġb ots", + "d aughter", + "ĠB alk", + "Ġbre thren", + "Ġlog os", + "Ġgo bl", + "f ps", + "Ġsub division", + "Ġp awn", + "Ġsquee zed", + "Ġmor ale", + "ĠD W", + "' \"", + "Ġkn ot", + "ook y", + "Ġdiv isive", + "Ġboost ed", + "ch y", + "ãĥ IJ", + "if act", + "Ġnewcom ers", + "ĠWrest ling", + "Ġsc outs", + "w olves", + "R at", + "Ġnin eteenth", + "ĠOs borne", + "St ats", + "Ġem powered", + "Ġpsych opath", + "ĠO EM", + "ugg age", + "ĠP K", + "ĠMoh ammad", + "P ak", + "Ġanarch ists", + "ĠExt ract", + "est hes", + "ĠStock holm", + "l oo", + "ĠG raph", + "Ġdeploy ing", + "ĠStr anger", + "ĠM old", + "Ġstaff er", + "Ġdiscount ed", + "uck le", + "ple ase", + "ĠLand ing", + "ÃŃ a", + "Ġ19 3", + "Ġan te", + "Ġrep etition", + "Ġ+ /-", + "Ġpar ody", + "Ġlive ly", + "AA A", + "ĠHor us", + "Ġp its", + "ind ers", + "L OC", + "ĠVen ice", + "40 6", + "ĠDis cover", + "â Ĩ", + "ellect ual", + "Ġp ens", + "Ġey el", + "ig uous", + "Im pl", + "Ġj oking", + "Ġinv al", + "ĠBel fast", + "Ġcredit ors", + "ĠSky walker", + "ov sky", + "Ġcease fire", + "Ġse als", + "is oft", + ") ).", + "ĠFel ix", + "IT S", + "Ġt resp", + "ĠBlock chain", + "ew are", + "ĠSch war", + "en ne", + "mount ed", + "ĠBe acon", + "les h", + "Ġimmense ly", + "Ġche ering", + "Em ploy", + "sc ene", + "ish ly", + "atche wan", + "ĠNic olas", + "Ġdr ained", + "ĠEx it", + "ĠAz erb", + "j un", + "Ġflo ated", + "u ania", + "De ep", + "Ġsuper v", + "Ġmyst ical", + "ĠD ollar", + "ĠApost le", + "ĠR EL", + "ĠProv ided", + "ĠB ucks", + "ãĥ ´", + "cut ting", + "Ġenhance ments", + "ĠPengu ins", + "ĠIsa iah", + "Ġj erk", + "ĠW yn", + "Ġst alled", + "Ġcryptoc urrencies", + "ĠR oland", + "sing le", + "Ġl umin", + "ĠF ellow", + "ĠCap acity", + "ĠKaz akh", + "W N", + "Ġfin anced", + "38 9", + "Ġt id", + "Ġcoll usion", + "ĠMy r", + "î Ģ", + "Sen ator", + "Ġped iatric", + "Ġneat ly", + "Ġsandwic hes", + "ĠArchitect ure", + "Ġt ucked", + "Ġbalcon y", + "Ġearthqu akes", + "qu ire", + "F uture", + "Ġhe fty", + "é Ĺ", + "Ġspecial izes", + "Ġstress es", + "Ġs ender", + "Ġmisunder standing", + "Ġep ile", + "Ġprov oke", + "ĠCol ors", + "Ġdis may", + "uk o", + "[ _", + "58 6", + "ne utral", + "Ġdon ating", + "ĠRand all", + "Mult i", + "Ġconvenient ly", + "ĠS ung", + "ĠC oca", + "Ġt ents", + "ĠAc celer", + "Ġpart nered", + "27 2", + "ir ming", + "ĠB AS", + "s ometimes", + "Ġobject ed", + "ub ric", + "p osed", + "LC S", + "gr ass", + "Ġattribut able", + "V IS", + "Israel i", + "Ġrepe ats", + "ĠR M", + "v ag", + "ut a", + "in ous", + "Ġin ert", + "ĠMig uel", + "æ Ń", + "ĠHawai ian", + "B oard", + "Ġart ific", + "ĠAzerb ai", + "as io", + "ĠR ent", + "A IN", + "Ġappl iances", + "Ġnational ity", + "Ġass hole", + "ĠN eb", + "Ġnot ch", + "h ani", + "ĠBr ide", + "Av ailability", + "Ġintercept ed", + "Ġcontin ental", + "Ġsw elling", + "ĠPers pect", + "b ies", + ". <", + "ith metic", + "ĠL ara", + "Ġtempt ing", + "add r", + "Ġoversee ing", + "cl ad", + "ĠD V", + "ĠGing rich", + "Ġm un", + "ĠApp ropri", + "Ġalter ations", + "ĠPat reon", + "Ġha voc", + "Ġdiscipl ines", + "Ġnotor iously", + "aku ya", + "ier i", + "? ).", + "ĠW ent", + "Ġsil icon", + "Ġtre mb", + "Cont ainer", + "K nown", + "Ġmort ar", + "est e", + "ick a", + "Ar thur", + "ĠPre viously", + "ĠMart y", + "Ġsp arse", + "g ins", + "Ġin ward", + "ĠParticip ant", + "C opy", + "ĠM isc", + "Ġantib iotic", + "ĠRet ro", + "Ġel usive", + "Ġass ail", + "ĠBatt alion", + "ĠB ought", + "Ġdimin ish", + "ĠEuro pa", + "s ession", + "ĠDanger ous", + "ies el", + "Ġdisbel ief", + "Ġbl asts", + "ext reme", + "ĠBoy d", + "ĠProject s", + "ĠGu ys", + "Ġunder gone", + "Ġgr ill", + "ĠDw ight", + "Ġ19 7", + "US ER", + "Ġfiles ystem", + "Ġcl ocks", + "T aylor", + "Ġwra pper", + "Ġfold ing", + "ous and", + "ĠPhilipp ine", + "ATION AL", + "ĠPer th", + "Ġas hes", + "Ġaccum ulate", + "ĠGate way", + "Sh op", + "orks hire", + "H an", + "ĠBar rel", + "ĠLe h", + "ĠX V", + "Ġwh im", + "Ġrep o", + "ĠC G", + "ĠM am", + "Ġincorpor ating", + "Ġbail out", + "Ġlingu istic", + "Ġdis integ", + "C LE", + "Ġcinem atic", + "ĠF iber", + "S yn", + "il ion", + "ĠCom pos", + "c hens", + "Ġne oc", + "Ġbo iled", + "F INE", + "on o", + "un cle", + "ik en", + "ĠB M", + "Î ¹", + "Ġreceipt s", + "Ġdisp osed", + "ĠTh irty", + "ĠR ough", + "ĠA BS", + "Ġnot withstanding", + "oll en", + "# $", + "Ġunrel iable", + "Ġbl oom", + "Ġmedi ocre", + "Ġtr am", + "ĠTas man", + "Ġsh akes", + "Ġmanifest o", + "ĠM W", + "Ġsatisf actory", + "Ġsh ores", + "Ġcomput ation", + "Ġassert ions", + "orm ons", + "ar ag", + "ab it", + "Dem ocrats", + "ĠL oot", + "ĠVol ks", + "ha ired", + "Ġgrav itational", + "S ing", + "ĠM iz", + "Ġthro ttle", + "Ġtyr anny", + "ĠView s", + "Ġrob ber", + "ĠMinor ity", + "Ġsh rine", + "sc ope", + "pur pose", + "Ġnucle us", + "our cing", + "ĠUS DA", + "ĠD HS", + "w ra", + "ĠBow ie", + "Sc ale", + "ĠB EL", + "x i", + "I ter", + "Ġ( ),", + "w right", + "Ġsail ors", + "ous ed", + "NAS A", + "ĠPro of", + "ĠMin eral", + "t oken", + "ĠF D", + "R ew", + "Ġe ll", + "6 30", + "Ġchance llor", + "ĠG os", + "Ġamount ed", + "ĠRec re", + "ome z", + "ĠOpt im", + "ĠOl ive", + "Ġtrack er", + "ow ler", + "ĠUn ique", + "R oot", + "Ġmar itime", + "ĠQur an", + "ĠAd apt", + "Ġecosystem s", + "ĠRe peat", + "ĠS oy", + "ĠI MP", + "Ġgrad uating", + "and em", + "P ur", + "ĠRes et", + "ĠTr ick", + "ĠPh illy", + "ĠT ue", + "ĠMalays ian", + "Ġclim ax", + "Ġb ury", + "Ġcons pic", + "ĠSouth ampton", + "ĠFl owers", + "Ġesc orted", + "ĠEduc ational", + "ĠI RC", + "Ġbrut ally", + "e ating", + "Ġpill ar", + "ĠS ang", + "ĠJ ude", + "ar ling", + "ĠAm nesty", + "Ġrem inding", + "ĠAdminist rative", + "hes da", + "Ġfl ashed", + "ĠP BS", + "per ate", + "fe ature", + "Ġsw ipe", + "Ġgra ves", + "oult ry", + "26 1", + "bre aks", + "ĠGu er", + "Ġsh rimp", + "ĠV oting", + "qu ist", + "Ġanaly tical", + "Ġtables poons", + "ĠS OU", + "Ġresear ched", + "Ġdisrupt ed", + "Ġj our", + "Ġrepl ica", + "Ġcart oons", + "b ians", + "} )", + "c opy", + "G ot", + "ou ched", + "P UT", + "Ġsw arm", + "not ations", + "s aid", + "Ġreb uilt", + "Ġcollabor ate", + "Ġr aging", + "Ġn ar", + "Ġdem ographics", + "ĠD DR", + "Ġdist rust", + "oss ier", + "ĠK ro", + "Ġpump kin", + "Ġreg rets", + "Ġfatal ities", + "ĠL ens", + "ĠO le", + "p d", + "Ġpupp et", + "ĠOut look", + "ĠSt am", + "O l", + "F air", + "U U", + "Ġre written", + "Ä ±", + "Ġfasc inated", + "Ġve ctors", + "Ġtrib unal", + "u ay", + "ĠM ats", + "ĠCo ins", + "[ [", + "Ġ18 1", + "Ġrend ers", + "ĠK aepernick", + "Ġesp ionage", + "Ġsum m", + "Ġd itch", + "Acc ount", + "Ġspread sheet", + "Ġmut ant", + "p ast", + "40 7", + "Ġd ye", + "Ġinit iation", + "Ġ4 000", + "Ġpunish able", + "Ġth inner", + "ĠKh al", + "Ġinter medi", + "D un", + "ĠGoth am", + "Ġeager ly", + "Ġvag inal", + "p owers", + "V W", + "ĠWATCH ED", + "Ġpred ator", + "ams ung", + "Ġdispar ity", + "Ġ[ *", + "Ġam ph", + "Ġout skirts", + "ĠSpir its", + "Ġskelet al", + "Ð »", + "ĠR ear", + "Ġissu ance", + "ĠLog ic", + "re leased", + "Z Z", + "ĠB ound", + "Ent ry", + "Ġex its", + "is ol", + "ĠFound er", + "Ġw re", + "ĠGreen land", + "ĠM MO", + "t aker", + "IN C", + "ãģ ¾", + "Ġhour ly", + "hen ko", + "Ġfantas ies", + "Ġdis ob", + "Ġdemol ition", + "ãĥ ĭ", + "Ġen listed", + "rat ulations", + "Ġmis guided", + "Ġens ured", + "Ġdiscour aged", + "m ort", + "Ġfl ank", + "Ġc ess", + "Ġreact s", + "ĠS ere", + "s ensitive", + "ĠSer pent", + "ass ad", + "Ġ24 7", + "Ġcalm ly", + "b usters", + "Ġble ed", + "ĠSt ro", + "Ġamuse ment", + "ĠAntar ctica", + "Ġs cept", + "ĠG aw", + "a q", + "ason ic", + "Ġsp rawling", + "n ative", + "atur ated", + "ĠBattle field", + "IV ERS", + "E B", + "ĠG ems", + "ĠNorth western", + "ĠFil ms", + "ĠAut omatic", + "Ġappre hend", + "ãģ ¨", + "Ġgui Name", + "Ġback end", + "Ġevid enced", + "ge ant", + "01 2", + "ĠS iege", + "Ġexternal To", + "Ġunfocused Range", + "ĠguiActiveUn focused", + "Ġgui Icon", + "ĠexternalTo EVA", + "ĠexternalToEVA Only", + "F ri", + "ch ard", + "en aries", + "Ġchief s", + "Ġc f", + "ĠH UD", + "Ġcorro bor", + "Ġd B", + "ĠT aken", + "ĠPat ricia", + "ra il", + "ĠCh arm", + "ĠLiber tarian", + "rie ve", + "Person al", + "ĠO UR", + "ger ies", + "Ġdump ing", + "Ġneurolog ical", + "it imate", + "ĠClint ons", + "raft ed", + "ĠM olly", + "Ġtermin als", + "reg ister", + "Ġfl are", + "Ġenc oded", + "Ġautop sy", + "p el", + "m achine", + "Ġexempt ions", + "ĠRoy als", + "d istance", + "Ġdraft s", + "Ġl ame", + "ĠC unning", + "Ġsp ouses", + "ĠMark ets", + "ĠCar rier", + "Ġimp lying", + "ĠY ak", + "s id", + "Ġl oser", + "Ġvigil ant", + "Ġimpe achment", + "Ġaug mented", + "ĠEmploy ees", + "Ġunint ended", + "tern ally", + "ĠW att", + "Ġrecogn izable", + "ess im", + "æ Ŀ", + "Ġco ated", + "r ha", + "Ġlie utenant", + "ĠLegisl ation", + "pub lished", + "44 4", + "01 3", + "Ġide ally", + "ĠPass word", + "Ġsimpl ify", + "ĠMet a", + "ĠM RI", + "Ġple ading", + "organ ized", + "hand ler", + "Ġun ravel", + "cor rect", + "Ġ icy", + "Ġparan oid", + "Ġpass er", + "Ġinspect ions", + "of er", + "ĠHealth care", + "28 3", + "ĠBr ut", + "iol a", + "for ge", + "ĠMed ieval", + "MS N", + "ie vers", + "ĠProgram ming", + "å ī", + "Ġ2 23", + "m u", + "ĠC LE", + "ug a", + "Ġsho ppers", + "Ġinform ative", + "ĠPl ans", + "Ġsupplement ation", + "ĠT ests", + "ty ard", + "ocy tes", + "ĠVeg a", + "ĠGujar at", + "erman ent", + "Ex cept", + "ĠL OT", + "all a", + "ĠC umm", + "ĠO sw", + "Ġven om", + "ĠDeb t", + "ĠD OWN", + "Ġreun ion", + "Ġm uc", + "ĠRel ief", + "Ġge op", + "ĠðŁ ĺ", + "al ogue", + "An th", + "ech o", + "Ġcor ros", + "Ġrepl ication", + "ĠBl azing", + "ĠD aughter", + "Ġinf lic", + "ĠLind sey", + "Ù Ī", + "28 4", + "Ex it", + "Ġgl oom", + "TA IN", + "Ġundermin ing", + "Ġadv ising", + "h idden", + "Ġover flow", + "Ġg or", + "urd ue", + "Ġe choes", + "enh agen", + "Ġimp uls", + "d rug", + "c ash", + "Ġas ync", + "Ġmir ac", + "at ts", + "p unk", + "Ġpiv ot", + "ĠLegisl ative", + "Ġblog gers", + "ĠCl aw", + "s burg", + "d yl", + "ĠRecomm end", + "Ġver te", + "Ġprohib iting", + "ĠPant her", + "Jon athan", + "Ġo min", + "Ġhate ful", + "28 1", + "ĠOr che", + "ĠMurd och", + "down s", + "Ġas ymm", + "G ER", + "Al ways", + "Ġinform s", + "ĠW M", + "ĠP ony", + "ĠApp endix", + "ĠAr lington", + "J am", + "Ġmedic inal", + "ĠS lam", + "IT IES", + "Ġre aff", + "ĠR i", + "F G", + "S pring", + "b ool", + "Ġthigh s", + "Ġmark ings", + "ĠRa qqa", + "ĠL ak", + "p oll", + "ts ky", + "ĠMort y", + "ĠDef inition", + "Ġdeb unk", + "end ered", + "ĠLe one", + "a vers", + "Ġmortg ages", + "App arently", + "N ic", + "ha us", + "ĠTh ousands", + "au ld", + "Ġm ash", + "sh oot", + "Ġdi arr", + "Ġconscious ly", + "H ero", + "e as", + "ĠN aturally", + "ĠDestroy er", + "Ġdash board", + "serv ices", + "R og", + "Ġmillenn ials", + "Ġinv ade", + "- (", + "Ġcomm issions", + "ĠA uckland", + "Ġbroadcast s", + "Ġfront al", + "Ġcr ank", + "ĠHist oric", + "Ġrum ours", + "CT V", + "Ġster il", + "Ġboost er", + "rock et", + "ãĤ ¼", + "ut sche", + "ĠP I", + "Ġ2 33", + "ĠProdu cer", + "ĠAnaly tics", + "Ġinval uable", + "Ġunint ention", + "ĠC Y", + "Ġscrut in", + "Ġg igg", + "Ġeng ulf", + "Ġprolet ariat", + "Ġh acks", + "ĠH ew", + "ar ak", + "ĠSl ime", + "ield ing", + "ag her", + "ĠEll iot", + "Ġtele com", + "Ġ2 19", + "ult an", + "ĠAr bor", + "ĠSc outs", + "B an", + "Ġlifes pan", + "Ġbl asp", + "38 8", + "Ġjud iciary", + "ĠContin ental", + "ask ing", + "Mc C", + "L ED", + "Ġbag gage", + "ĠSorce rer", + "Ġrem nants", + "ĠGriff ith", + "ets u", + "ĠSub aru", + "ĠPerson ality", + "des igned", + "ush ima", + "agn ar", + "Ġrec oil", + "Ġpass ions", + "\\ \":", + "Ġte e", + "Ġabol ition", + "ĠCreat ing", + "j ac", + "Ġ19 4", + "01 9", + "Ġpill ars", + "ric hed", + "/ \"", + "t k", + "Ġlive lihood", + "Ġro asted", + "ah on", + "ĠH utch", + "ass ert", + "Ġdivid end", + "Ġkn it", + "Ġd aunting", + "Ġdisturb ance", + "Ġsh ale", + "Ġcultiv ated", + "Ġrefriger ator", + "L B", + "ĠN ET", + "Ġcommercial s", + "Ġthink ers", + "45 5", + "Ġch op", + "B road", + "Ġsuspic ions", + "Ġtag ged", + "l ifting", + "Ġsty lish", + "ĠShield s", + "Short ly", + "Ġt ails", + "A uth", + "ST E", + "ĠG AME", + "Ġse ism", + "ĠK is", + "olog ne", + "Ġcow ork", + "Ġforc ibly", + "Ġthy roid", + "ĠP B", + "AN E", + "mar ried", + "h orse", + "Ġpoly mer", + "ĠCh al", + "od or", + "DE BUG", + "ĠCon text", + "Ġbl iss", + "Ġpin point", + "ĠMat hemat", + "leg ram", + "ĠWeek end", + "Ġlab elled", + "Ġb art", + "it les", + "Ġest rogen", + "âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ", + "\" '", + "Ġvis ibly", + "Ġouts ider", + "aid a", + "Are a", + "Ġdisse min", + "Ġdish onest", + "ĠCl osed", + "ĠBullet in", + "ĠRam sey", + "sw ord", + "ĠX I", + "our ced", + "S ame", + "34 6", + "ĠRe pe", + "ĠK ou", + "c ake", + "em is", + "C ache", + "ĠMe aning", + "ĠEn light", + "onom y", + "Ġmanifest ation", + "sw orth", + "J ay", + "Ġch ore", + "ö r", + "D ream", + "Ġsanction ed", + "Ġcult urally", + "ĠA ra", + "N av", + "Ġthe ological", + "Ġstr ut", + "ĠV O", + "ĠHand book", + "Ġconstruct ing", + "Ġ ¶", + "ĠBenef its", + "ĠPsych ological", + "s ac", + "å ¸", + "p olicy", + "ĠMat ters", + "ĠReport ed", + "ĠBy te", + "Ġvit ro", + "ĠM aiden", + "Ġl am", + "ĠJenn ings", + "Ġgar ment", + "ĠRut gers", + "ĠStaff ord", + "ĠWell ington", + "Ġinter mitt", + "Ġn pm", + "Ġord eal", + "Ġplug ged", + "o oming", + "in ished", + "fram ework", + "Ġtim ber", + "Ġc ass", + "Ġ8 50", + "il ess", + "ĠRed ux", + "7 68", + "St re", + "Ġsurpass ed", + "w hel", + "Ġparalle ls", + "Ġve il", + "ĠG I", + "ĠR EST", + "Ġread iness", + "s ort", + "Ġmod ifying", + "ĠSl ate", + "ru ff", + "Ġmar ble", + "Ġinf rared", + "Ġaud itor", + "ĠFANT ASY", + "ĠP overty", + "ĠS PD", + "Ġ\" (", + "K y", + "RA Y", + "Ġexecut ions", + "ĠBever ly", + "ĠMarx ism", + "ĠBur st", + "ĠK ali", + "est ones", + "Clear ly", + "E ll", + "ãģ §", + "ĠProceed ings", + "T oken", + "IF IC", + "ñ a", + "Cent ral", + "ĠH aley", + "ĠD rama", + "Ġform ations", + "OR N", + "Book s", + "Ġdom inating", + "ĠFly ers", + "ĠCompan ion", + "Ġdiscipl ined", + "ĠYug oslav", + "ĠSpell s", + "Ġv engeance", + "Ġland lords", + "L en", + "ĠO gre", + "ano ia", + "Ġpier cing", + "Ġcon greg", + "Ġscore r", + "ob ia", + "Ġnic kel", + "ĠLear ns", + "Ġre jo", + "Ġmaster piece", + "Fl ash", + "Ġinhab ited", + "ĠOpen GL", + "ĠD ud", + "ĠI CO", + "Ġar ter", + "Ġpl ur", + "Ġmaster y", + "Ġlong standing", + "st ed", + "Ġw ines", + "Ġtelev ised", + "ĠSh rine", + "ĠBay ern", + "Ġâ ĵĺ", + "Ġencl osure", + "j ohn", + "Ġprophe ts", + "ĠRes urrection", + "ĠOrd ers", + "Ġun even", + "r als", + "Ġd wind", + "ĠL ah", + "ĠSl oven", + "37 8", + "Ġins istence", + "aff le", + "ĠCl one", + "Ġhard ship", + "ĠCongress man", + "Ġple ad", + "Ġreview ers", + "Ġc ured", + "Ġ19 35", + "as ley", + "f ake", + "ĠTh inking", + "yd ia", + "P ART", + "ĠD ota", + "o it", + "Ġwh ipped", + "Ġb ouncing", + "ĠHispan ics", + "com ings", + "Ġcann abin", + "ĠCh ambers", + "ĠZ ack", + "Option al", + "Ġco ats", + "Ġprow ess", + "ĠNort on", + "Ġplain ly", + "Ġfre ight", + "Ġinhib ition", + "Ġcl am", + "Ġ30 3", + "ke f", + "ale igh", + "L uke", + "Ġpsych o", + "ator ium", + "M ED", + "Ġtreat ies", + "Ġind isc", + "Ġd c", + "OP S", + "Ġresil ient", + "ĠInter state", + "Ġsl ack", + "Ġmund ane", + "Ġestab lishes", + "35 9", + "Ġstr ained", + "Ġn ond", + "S us", + "Ġcast e", + "ar ate", + "ie ving", + "Ġunfair ly", + "Ġpars er", + "on ial", + "urs ive", + "V ia", + "ĠOtt o", + "ĠAuthor ities", + "stro ke", + "K R", + "ĠMer cy", + "Ġfurn ished", + "Ġout set", + "Ġmet ic", + "19 82", + "olith ic", + "ĠT ent", + "og ical", + "ĠA ircraft", + "Ġh ides", + "ĠBec ame", + "Ġeduc ators", + "re aching", + "Ġvol atility", + "Ġtodd ler", + "ĠNAS CAR", + "ĠTw elve", + "ĠHigh lights", + "Ġgra pe", + "Ġspl its", + "Ġpe asant", + "Ġre neg", + "ĠMS I", + "Tem p", + "st ars", + "Ġtre k", + "ĠHy de", + "b inding", + "Ġreal ism", + "Ġox ide", + "ĠH os", + "Ġmount s", + "Ġbit ing", + "Ġcollaps ing", + "Ġpost al", + "Ġmuse ums", + "Ġdet ached", + "Ġrespect ing", + "Ġmonop ol", + "Ġwork flow", + "ĠC ake", + "Tem plate", + "ĠOrgan isation", + "Ġpers istence", + "36 9", + "C oming", + "B rad", + "Ġredund ant", + "ĠG TA", + "Ġb ending", + "Ġrev oked", + "Ġoff ending", + "Ġfram ing", + "Ġprint f", + "Comm un", + "mem bers", + "Out side", + "Ġconst rued", + "Ġc oded", + "F ORE", + "Ġch ast", + "Ch at", + "Ind ian", + "ĠY ard", + "? !\"", + "ĠP orts", + "ĠX avier", + "ĠR ET", + "' .\"", + "ĠBo at", + "iv ated", + "ich t", + "umer able", + "D s", + "ĠDun n", + "Ġcoff in", + "Ġsecure ly", + "ĠRapt ors", + "ĠB es", + "Install ation", + "Ġin ception", + "ĠHealth y", + "end ants", + "Ġpsych ologists", + "ĠShe ikh", + "c ultural", + "ĠBlack Berry", + "sh ift", + "F red", + "oc he", + "Ġc akes", + "ĠS EO", + "ĠG ian", + "ĠAs ians", + "og ging", + "e lement", + "Ġpund its", + "ĠV augh", + "ĠG avin", + "Ġh itter", + "Ġdrown ed", + "Ġch alk", + "ĠZ ika", + "Ġmeas les", + "80 2", + "âĢ¦ ..", + "ĠAW S", + "] \"", + "Ġdist ort", + "ĠM ast", + "Ġantib odies", + "ĠM ash", + "Mem ory", + "ĠUg anda", + "ĠPro b", + "Ġvom iting", + "ĠTurn s", + "Ġoccup ying", + "Ġev asion", + "ĠTher apy", + "Ġprom o", + "Ġelect r", + "Ġblue print", + "ĠD re", + "pr iced", + "ĠDep ot", + "Ġallev iate", + "ĠSom ali", + "m arg", + "n ine", + "Ġnostalg ia", + "ĠShe pherd", + "Ġcaval ry", + "Ġtor ped", + "ĠBlood y", + "x b", + "Ġs ank", + "Ġgo alt", + "report print", + "embed reportprint", + "clone embedreportprint", + "ĠIn itially", + "ĠF ischer", + "Ġnot eworthy", + "c ern", + "Ġin efficient", + "raw download", + "rawdownload cloneembedreportprint", + "c ation", + "ĠD ynasty", + "l ag", + "D ES", + "Ġdistinct ly", + "ĠEston ia", + "Ġopen ness", + "Ġg ossip", + "ru ck", + "W idth", + "ĠIb rahim", + "Ġpet roleum", + "Ġav atar", + "ĠH ed", + "ath a", + "ĠHog warts", + "Ġc aves", + "67 8", + "Ġsafegu ard", + "ĠM og", + "iss on", + "ĠDur ham", + "sl aught", + "ĠGrad uate", + "Ġsub conscious", + "ĠEx cellent", + "ĠD um", + "---- -", + "Ġp iles", + "ĠW ORK", + "ĠG arn", + "ĠF ol", + "ĠAT M", + "Ġavoid s", + "ĠT ul", + "Ġble ak", + "EL Y", + "iv ist", + "light ly", + "P ers", + "ĠD ob", + "ĠL S", + "Ġins anity", + "Î µ", + "atal ie", + "En large", + "Ġtw ists", + "Ġfault y", + "Ġpir acy", + "Ġimp over", + "Ġrug ged", + "ĠF ashion", + "Ġs ands", + "' ?", + "sw ick", + "Ġn atives", + "Ġhe n", + "ĠNo ise", + "ãĥ Ĺ", + "Ġg reens", + "Ġfree zer", + "Ġd ynasty", + "ĠFather s", + "ĠNew ark", + "Ġarchae ological", + "Ġo t", + "ob ar", + "Ġblock ade", + "Ġall erg", + "L V", + "Ġdeb it", + "ĠR FC", + "ĠMil ton", + "ĠPress ure", + "Ġwill ingly", + "Ġdisproportion ate", + "Ġopp ressive", + "Ġdiamond s", + "Ġbelong ings", + "19 70", + "Ġbell s", + "Ġimperial ism", + "Ġ2 27", + "Ġexpl oding", + "ĠE clipse", + "Ġ19 19", + "Ġr ant", + "Ġnom inations", + "34 7", + "Ġpeace fully", + "ric a", + "ĠF UCK", + "Ġvib ration", + "mal ink", + "Ġro pes", + "ĠIv anka", + "ĠBrew ery", + "ĠBook er", + "ĠOw ens", + "go ers", + "Serv ices", + "ĠSn ape", + "Ġ19 1", + "39 5", + "Ġ2 99", + "just ice", + "Ġb ri", + "Ġdisc s", + "Ġprom inently", + "Ġvul gar", + "Ġsk ipping", + "l ves", + "Ġtsun ami", + "37 4", + "ĠU rug", + "ĠE id", + "rec ated", + "p hen", + "Ġfault s", + "ĠStart ed", + "9 50", + "Ġp i", + "Ġdetect or", + "Ġbast ard", + "Ġvalid ated", + "Space Engineers", + "OUR CE", + "Ġ( ~", + "Ġuns ur", + "Ġaff irmed", + "Ġfasc ism", + "Ġres olving", + "ĠCh avez", + "ĠC yn", + "Ġdet ract", + "L ost", + "Ġrig ged", + "Ġhom age", + "ĠBrun o", + "55 5", + "ec a", + "Ġpress es", + "Ġhum our", + "Ġsp acing", + "Ġ' /", + "olk ien", + "C oun", + "OP ER", + "T re", + "S on", + "ĠCambod ia", + "ier re", + "m ong", + "o zy", + "Ġliquid ity", + "ĠSov iets", + "ĠFernand o", + "Ġ2 29", + "Ġsl ug", + "ĠCatal an", + "elect ric", + "Ġsc enery", + "ĠH earth", + "Ġconst rained", + "Ġgoal ie", + "ĠGu idelines", + "ĠAm mo", + "ĠPear son", + "Ġtax ed", + "Ġfet us", + "Resp onse", + "ĠAlex is", + "th ia", + "G uy", + "Ġrecon struct", + "Ġextrem es", + "Ġconclud ing", + "ĠP eg", + "ook s", + "Ġded uctions", + "R ose", + "Ġground breaking", + "ĠT arg", + "ãĥ ģ", + "ĠRe ve", + "res ource", + "Ġmo ons", + "Ġelectrom agnetic", + "Ġamid st", + "ĠVik tor", + "N ESS", + "B ACK", + "Ġcomm ute", + "ĠAna heim", + "Ġfluct uations", + "6 40", + "Ġnood les", + "ĠCop enhagen", + "ĠT ide", + "ĠGri zz", + "ĠS EE", + "Ġpip elines", + "Ġsc ars", + "end o", + "ag us", + "ĠE TF", + "/ #", + "ĠBec ome", + "44 8", + "Ġvis c", + "ĠRecomm ended", + "Ġj umper", + "Ġcogn ition", + "Ġassass in", + "Ġwitness ing", + "ĠSet up", + "Ġl ac", + "v im", + "IS M", + "p ages", + "SS L", + "35 8", + "Ġad ject", + "indust rial", + "l ore", + "cher y", + "Ġgl itter", + "Ġc alf", + "Flor ida", + "Ġspoil ers", + "Ġsucceed s", + "Ġch anting", + "Ġslog ans", + "ĠTr acy", + "Vis it", + "rol ogy", + "Ġm ornings", + "Ġline age", + "Ġs ip", + "Ġintense ly", + "Ġflour ish", + "ĠSle eping", + "ĠF em", + "or por", + "ĠK lan", + "ĠDar th", + "h ack", + "ĠNi elsen", + "Ġtum ors", + "Ġprocure ment", + "ĠY orkshire", + "Ġra ided", + "K Y", + "An na", + "Ġ// [", + "ĠDis order", + "ĠMust ang", + "ĠW en", + "ĠTry ing", + "s q", + "Ġdeliver ies", + "Ġshut ter", + "Ġcere bral", + "Ġbip olar", + "ĠC N", + "l ass", + "j et", + "Ġdeb ating", + "> :", + "Ġe agle", + "gr ades", + "ĠD ixon", + "UG C", + "M AS", + "ĠDr aco", + "ĠMach ines", + "aff er", + "Ġem an", + " ²", + "pr on", + "ĠG ym", + "Ġcompar atively", + "ĠTrib unal", + "PR O", + "Ġle x", + "Ġfert ile", + "Ġdep ressing", + "Ġsuperf icial", + "ess ential", + "ĠHun ters", + "g p", + "Ġprom inence", + "L iber", + "ĠAn cest", + "ote chnology", + "Ġm ocking", + "ĠTra ff", + "ĸ ļ", + "Med ium", + "I raq", + "Ġpsychiat rist", + "Quant ity", + "ĠL ect", + "Ġno isy", + "5 20", + "G Y", + "Ġsl apped", + "ĠM TV", + "Ġpar a", + "p ull", + "Mult iple", + "as her", + "Ġn our", + "ĠSe g", + "Spe ll", + "v ous", + "ord ial", + "Sen ior", + "ĠGold berg", + "ĠPl asma", + "ne ed", + "Ġmess enger", + "ere t", + "Ġteam ed", + "Ġliter acy", + "ĠLe ah", + "ĠD oyle", + "Ġem itted", + "U X", + "Ġev ade", + "Ġm aze", + "Ġwrong ly", + "ĠL ars", + "Ġstere otype", + "Ġpled ges", + "Ġarom a", + "ĠM ET", + "Ġac re", + "ĠO D", + "Ġf f", + "Ġbrew eries", + "ĠH ilton", + "und le", + "ĠK ak", + "ĠThank fully", + "ĠCan ucks", + "in ctions", + "ĠApp ears", + "Ġco er", + "Ġundermin ed", + "ro vers", + "And re", + "Ġbl aze", + "um ers", + "Ġfam ine", + "amp hetamine", + "ulk an", + "Am ount", + "Ġdesper ation", + "wik ipedia", + "develop ment", + "ĠCor inth", + "uss ia", + "Jack son", + "L I", + "N ative", + "R s", + "Oh io", + "ĠKath leen", + "F ortunately", + "Ġattend ant", + "ĠPre ferred", + "ĠDid n", + "ĠV s", + "M is", + "Ġrespond ent", + "Ġb oun", + "st able", + "Ġp aved", + "Ġunex pl", + "ĠChe ney", + "L M", + "ĠC ull", + "bl own", + "Ġconfront ing", + "oc ese", + "serv ing", + "W i", + "ĠLith uania", + "ann i", + "Ġst alk", + "h d", + "Ġv ener", + "AP H", + "ynchron ous", + "UR R", + "um ably", + "hist oric", + "H alf", + "H ay", + "Ġresil ience", + "spe ction", + "Ġabandon ing", + "O bs", + "ĠDeb bie", + "Ġgrad ient", + "ĠPl aint", + "ĠCan al", + "AR CH", + "Ġexpans ive", + "Ġfun g", + "Ġb ounced", + "U nd", + "Ġprec autions", + "Ġclar ification", + "Ġd agger", + "Ġgri ps", + "Ġ µ", + "ĠRiver a", + "ĠUnd ead", + "is ites", + "ĠFIR ST", + "ñ o", + "aud i", + "Ġhost ages", + "Ġcompl iant", + "Ġal umni", + "Se ven", + "Ġcyber security", + "e ither", + "Col lect", + "Ġinvari ably", + "ĠS oci", + "Ġlaw maker", + "Ġa le", + "ĠPerson ally", + "N azi", + "Ġcustom ization", + "ĠPro c", + "ĠSask atchewan", + "eat uring", + "Ġsp ared", + "Ġdiscontin ued", + "Ġcomput ational", + "ĠMotor ola", + "Ġsuprem acist", + "government al", + "Ġparad ise", + "ĠDown ing", + "ĠNik on", + "Ġcat alyst", + "ber ra", + "Tor onto", + "8 75", + "bet a", + "ĠMac ron", + "Ġunreal istic", + "ve ctor", + "ĠVeh icles", + "it iveness", + "ĠR V", + "ĠCol bert", + "s in", + "o ji", + "ent in", + "ĠKr ish", + "hell o", + "ff ield", + "ok y", + "ĠT ate", + "Ġmap le", + "Ġa ids", + "chem ical", + "33 4", + "n uts", + "ĠWar p", + "Ġx x", + "ĠRob b", + "umer ous", + "_- _", + "ft ime", + "ĠV W", + "Ġw inger", + "ĠD ome", + "t ools", + "ĠP V", + "ĠGe orgetown", + "Ġg eared", + "Ġjihad ists", + "Ġc p", + "Ġster oids", + "M other", + "cler osis", + "ĠDR M", + "nes ia", + "Ġl inger", + "Ġimm ersive", + "ĠC OUN", + "Ġoutwe igh", + "ens ual", + "B and", + "Ġtransform s", + "mat ched", + "ps ons", + "ĠJud icial", + "f actor", + "Ġrefer ral", + "Ġodd ly", + "ĠW enger", + "B ring", + "ĠB ows", + "60 2", + "IC LE", + "Ġl ions", + "ĠAcad emic", + "ĠTh orn", + "ĠRa ider", + "kef eller", + "St orage", + "L ower", + "ĠOr t", + "ĠEqu ality", + "AL T", + "ĠS OC", + "T ypes", + "Ġl yn", + "ĠAss et", + "co at", + "TP P", + "C VE", + "ĠPione er", + "app lication", + "Mod ern", + "ĠH K", + "En vironment", + "Al right", + "R ain", + "IP P", + "ĠShi ite", + "Ġm ound", + "ĠAb ilities", + "cond ition", + "St aff", + "Ġcompet ence", + "ĠM oor", + "ĠDi ablo", + "Ġwith held", + "Ġost ensibly", + "ĠB rom", + "Ġms g", + "Ġden omin", + "ĠRef erences", + "ĠF P", + "Ġplun ged", + "Ġp amph", + "m oving", + "cent ral", + "Ġdown right", + "Ġf ading", + "T al", + "T yp", + "ĠTh y", + "uk es", + "it he", + "Ġo ve", + "Ġbatt led", + "Ġseaf ood", + "Ġfig ur", + "ĠR D", + "c rop", + "Ġsqu ads", + "{ \\", + "à ¹", + "ĠE h", + "Ġinterview ing", + "ĠQ in", + "Ġas piring", + "PL IC", + "Ġcla uses", + "ĠG ast", + "ĠN ir", + "Ġl uggage", + "Ġh ose", + "Ġsystem d", + "Ġdesc ending", + "ĠRev ised", + "ĠR ails", + "al ign", + "70 9", + "33 7", + "Ġf ug", + "charg ing", + "t ags", + "Ġut er", + "k ish", + "WAR NING", + "49 0", + "prof its", + "Ġvoy age", + "Ġa ce", + "ĠV anguard", + "ĠT anks", + "ĠM uk", + "Ġ2 26", + "S afe", + "Ar mor", + "Ġvolcan ic", + "Ġwom b", + "ĠM IL", + "Ġbegin ner", + "ĠRec ogn", + "ĠA AP", + "PL AY", + ") !", + "Ġdetect ing", + "c n", + "Ġbre aches", + "Bas ically", + "ĠP ag", + "ĠMunicip al", + "ĠInd ie", + "ĠL af", + "ĠDis able", + "ĠOl son", + "Ġrest rained", + "Ġrul ings", + "Ġhum ane", + "ev ents", + "ĠCinem a", + "display Text", + "ĠH atch", + "action Date", + "onna issance", + "Ġassault ing", + "ĠL ug", + "CH AT", + "Ġvig orous", + "ĠPer se", + "Ġintoler ance", + "ĠSnap chat", + "ĠSh arks", + "Ġd ummy", + "ĠDi agn", + "ĠGu itar", + "im eters", + "40 3", + "RE G", + "A x", + "Ġsepar ates", + "ĠMah m", + "Ġt v", + "j ah", + "O OL", + "C irc", + "ĠWinds or", + "uss ian", + "Ġintu ition", + "Ġdis dain", + "ĠDon ovan", + "Ġ2 21", + "E mb", + "Ġcondem ning", + "Ġgener osity", + "zz y", + "Ġpant ies", + "ĠPre vent", + "Action Code", + "AN A", + "34 2", + "external ActionCode", + "Ġspec ifying", + "Ġcryst all", + "J ere", + "Ġru pt", + "ĠApp rentice", + "Ġprof iling", + "Ð º", + "St rike", + "Ġsid eline", + "Ġoblig ated", + "Ġocc ult", + "Ġbureaucr atic", + "ant ically", + "rupt ed", + "neg ative", + "ĠEthiop ia", + "ĠC ivic", + "Ġins iders", + "el igible", + "ĠTV s", + "ĠB AR", + "ĠT I", + "i ologist", + "ĠA IR", + "Ġsubstit uted", + "Ar ab", + "ĠS aul", + "ĠY og", + "p rem", + "Ġbuild ers", + "Ġstation ary", + "Ġdoubt ful", + "Ġvig orously", + "Ġthr illing", + "Ph ysical", + "ĠCare y", + "ĠHyd ra", + "geon ing", + "ĠS ly", + "y ton", + "Ġborrow ers", + "ĠPark inson", + "Ġ ë", + "ĠJama ica", + "Ġsat ir", + "Ġinsurg ents", + "ĠF irm", + "Ġis ot", + "ĠK arn", + "our ning", + "ak ens", + "doc s", + "l ittle", + "ĠMon aco", + "CL ASS", + "Tur key", + "L y", + "ĠCon an", + "ass ic", + "Ġstar red", + "ĠPac ers", + "et ies", + "Ġt ipping", + "M oon", + "ĠR w", + "s ame", + "Ġcav ity", + "Ġgo of", + "ĠZ o", + "Sh ock", + "um mer", + "Ġemphas izes", + "Ġreg rett", + "Ġnovel ty", + "Ġen vy", + "ĠPass ive", + "r w", + "50 5", + "Ġind ifferent", + "ĠR ica", + "ĠHim self", + "ĠFred die", + "Ġad ip", + "ä¸ Ģ", + "Ġbreak out", + "Ġhur ried", + "ĠHu ang", + "ĠD isk", + "Ġro aming", + "?????- ?????-", + "U V", + "ĠRick y", + "ĠS igma", + "Ġmarginal ized", + "Ġed its", + "Ġ30 4", + "mem ory", + "Ġspec imen", + "29 3", + "ãģ ¯", + "Ġvert ically", + "Ġaud ition", + "ĠHe ck", + "Ġc aster", + "ĠHold ings", + "ad al", + "ĠC ron", + "ĠL iam", + "Ġdef lect", + "P ick", + "ĠDeb ug", + "RE F", + "Ġvers atility", + "ot hes", + "class ified", + "ĠMah ar", + "ĠH ort", + "C ounter", + "st asy", + "not iced", + "33 1", + "ĠSh im", + "f uck", + "ĠB ie", + "Ġair ing", + "ĠPro tein", + "ĠHold ing", + "Ġspect ators", + "ili ated", + "ĠThat cher", + "n osis", + "ãĥ¼ ãĥ³", + "Te le", + "B oston", + "ĠTem pl", + "st ay", + "Ġdecl arations", + "47 9", + "Vol ume", + "ĠDesign er", + "ĠOver watch", + "id ae", + "Ġon wards", + "Ġn ets", + "ĠMan ila", + "part icularly", + "Ġpolit ic", + "o other", + "Ġport raits", + "Ġpave ment", + "c ffff", + "Ġs aints", + "Ġbegin ners", + "ES PN", + "Ġshort comings", + "âķIJ âķIJ", + "Ġcom et", + "ĠOrgan ic", + "qu el", + "Ġhospital ized", + "Bre ak", + "Ġpe el", + "dyl ib", + "asp x", + "ur ances", + "ĠT IM", + "P g", + "Ġread able", + "ĠMal ik", + "Ġm uzzle", + "Ġbench marks", + "d al", + "ĠV acc", + "ĠH icks", + "60 9", + "ĠB iblical", + "he ng", + "Ġover load", + "ĠCivil ization", + "Ġimm oral", + "Ġf ries", + "ãĤ Ĵ", + "Ġreprodu ced", + "Ġform ulation", + "j ug", + "ire z", + "g ear", + "Ġco ached", + "Mp Server", + "ĠS J", + "ĠK w", + "In it", + "d eal", + "ĠO ro", + "ĠL oki", + "ĠSong s", + "Ġ23 2", + "ĠLou ise", + "asion ally", + "Ġunc ond", + "olly wood", + "Ġprogress ives", + "ĠEn ough", + "ĠDo e", + "Ġwreck age", + "Ġbr ushed", + "ĠBase Type", + "Ġz oning", + "ish able", + "het ically", + "ĠC aucus", + "ĠH ue", + "Ġk arma", + "ĠSport ing", + "Ġtrad er", + "Ġseem ing", + "ĠCapt ure", + "4 30", + "b ish", + "Ġt unes", + "Ġindo ors", + "ĠSp here", + "ĠD ancing", + "TER N", + "Ġno b", + "ĠG ST", + "m aps", + "Ġpe ppers", + "F it", + "Ġoverse es", + "ĠRabb i", + "ĠR uler", + "vert ising", + "off ice", + "xx x", + "Ġra ft", + "Ch anged", + "Ġtext books", + "L inks", + "ĠO mn", + "ãĢ ij", + "Ġinconven ience", + "ĠDon etsk", + "= ~", + "Ġimplicit ly", + "Ġboost s", + "ĠB ones", + "ĠBo om", + "Cour tesy", + "Ġsens ational", + "AN Y", + "Ġgre edy", + "ed en", + "Ġinex per", + "ĠL er", + "ĠV ale", + "Ġtight en", + "ĠE AR", + "ĠN um", + "Ġancest or", + "S ent", + "ĠH orde", + "urg ical", + "all ah", + "Ġsa p", + "amb a", + "ĠSp read", + "tw itch", + "Ġgrand son", + "Ġfract ure", + "Ġmoder ator", + "ĠSe venth", + "ĠRe verse", + "Ġestim ation", + "Cho ose", + "Ġpar ach", + "Ġbar ric", + "ãĢ IJ", + "Ġcomp ass", + "Ġall ergic", + "âĢ ķ", + "OT HER", + "err illa", + "Ġw agon", + "Ġz inc", + "Ġrub bed", + "ĠFull er", + "ĠLuxem bourg", + "ĠHoo ver", + "Ġli ar", + "ĠEven ing", + "ĠCob b", + "est eem", + "Ġselect or", + "ĠB rawl", + "is ance", + "ĠE k", + "Ġtro op", + "Ġg uts", + "ĠApp eal", + "ĠTibet an", + "Ġrout ines", + "ĠM ent", + "Ġsummar ized", + "steam apps", + "Ġtr anqu", + "Ġ19 29", + "or an", + "ĠAut hent", + "Ġg maxwell", + "Ġappre hens", + "Ġpo ems", + "Ġsa usage", + "ĠWeb ster", + "ur us", + "Ġthem ed", + "Ġl ounge", + "Ġcharg er", + "Sp oiler", + "Ġsp illed", + "h og", + "ĠSu nder", + "ĠA in", + "ĠAng ry", + "Ġdis qual", + "ĠFrequ ency", + "ĠEther net", + "Ġhel per", + "Per cent", + "Ġhorr ifying", + "Ġa il", + "ĠAll an", + "EE E", + "ĠCross ing", + "44 9", + "Ġh olog", + "ĠPuzz les", + "ĠGo es", + "eren n", + "60 4", + "ãģ ı", + "ĠRaf ael", + "Ġatt en", + "ĠE manuel", + "Ġup ro", + "ĠSus p", + "P sych", + "ĠTr ainer", + "ĠN ES", + "ĠHun ts", + "bec ue", + "Ġcounsel or", + "R ule", + "Ġtox ins", + "Ġb anners", + "r ifice", + "Ġgreet ing", + "Ġfren zy", + "Ġall ocate", + "Ġ* )", + "ex pr", + "50 3", + "ĠCh ick", + "ĠT orn", + "Ġconsolid ation", + "ĠF letcher", + "sw itch", + "fr ac", + "cl ips", + "ĠMcK in", + "ĠLun ar", + "Mon th", + "IT CH", + "Ġscholar ly", + "rap ed", + "39 8", + "Ġ19 10", + "Ġe greg", + "Ġin secure", + "Ġvict orious", + "cffff cc", + "Ġsing led", + "Ġel ves", + "ĠW ond", + "bur st", + "Ġcam oufl", + "ĠBL ACK", + "Ġcondition ed", + "ç ī", + "ans wered", + "Ġcompuls ory", + "asc ist", + "Ġpodcast s", + "ĠFrank furt", + "bn b", + "Ġne oliberal", + "ĠKey board", + "ĠBel le", + "w arm", + "Ġtrust s", + "Ġins ured", + "ĠBu cc", + "us able", + "60 7", + "ĠPl ains", + "Ġ18 90", + "Ġsabot age", + "Ġlod ged", + "f elt", + "Ġg a", + "ĠN arc", + "ĠSal em", + "Ġsevent y", + "ĠBl ank", + "p ocket", + "Ġwhis per", + "Ġm ating", + "om ics", + "ĠSal man", + "ĠK ad", + "Ġan gered", + "Ġcoll isions", + "Ġextraord inarily", + "Ġcoerc ion", + "G host", + "b irds", + "è Ģ", + "k ok", + "Ġper missible", + "avor able", + "Ġpo inters", + "Ġdiss ip", + "ac i", + "Ġtheat rical", + "ĠCos mic", + "Ġforget ting", + "Ġfinal ized", + "å¤ §", + "y out", + "l ibrary", + "Ġbo oming", + "ĠBel ieve", + "ĠTe acher", + "ĠL iv", + "ĠGOOD MAN", + "ĠDomin ican", + "OR ED", + "ĠPart ies", + "Ġprecip itation", + "ĠSl ot", + "R oy", + "ĠComb ined", + "Ġinteg rating", + "Ġch rome", + "Ġintest inal", + "ĠRe bell", + "Ġmatch ups", + "Ġblock buster", + "ĠLore n", + "ĠLe vy", + "Ġpre aching", + "ĠS ending", + "ĠPur pose", + "ra x", + "f if", + "Ġauthor itative", + "ĠP ET", + "ast ical", + "Ġdish on", + "Ġchat ting", + "Ġ\"$ :/", + "Connect ion", + "Ġrecre ate", + "Ġdel inqu", + "Ġbro th", + "ĠD irty", + "ĠAd min", + "z man", + "Ġscholars hips", + "Ġ25 3", + "cont act", + "als a", + "7 67", + "c reen", + "abb age", + "Ġ19 15", + "Ġbl ended", + "Ġal armed", + "L anguage", + "35 6", + "Ġbl ends", + "ĠCh anged", + "W olf", + "Ġhe pat", + "Creat ing", + "Ġper secut", + "Ġsweet ness", + "art e", + "Ġforfe iture", + "ĠRober to", + "im pro", + "N FL", + "ĠMag net", + "Det ailed", + "Ġinsign ificant", + "ĠPOL IT", + "ĠBB Q", + "ĠC PS", + "Ġse aw", + "amin er", + "m L", + "end if", + "f inals", + "Ġ26 5", + "u ish", + "Ġ} )", + "ĠPro blems", + "Ġem blem", + "Ġserious ness", + "Ġpars ing", + "Ġsubst itution", + "Ġpress ured", + "Ġrecy cled", + "ale b", + "Rub y", + "Ġprof iciency", + "Dri ver", + "ĠW ester", + ": '", + "AF TA", + "Ġm antle", + "ĠClay ton", + "fl ag", + "Ġpractition er", + "c overed", + "ĠSt ruct", + "add afi", + "4 25", + "ĠTown ship", + "ĠHyd ro", + "Lou is", + "34 3", + "Ġcond o", + "ĠT ao", + "Ġutil ization", + "Ġnause a", + "ĠDem s", + "rid ges", + "p ause", + "Ġform ulas", + "Ġchall enger", + "37 6", + "Ġdefect ive", + "ĠRail way", + "ĠPub Med", + "Ġyog urt", + "l bs", + "ĠNor folk", + "OP E", + "ĠMood y", + "Ġdistribut or", + "Ġscroll s", + "Ġextract s", + "St an", + "Ġv iability", + "Ġexp oses", + "Ġstar vation", + "ĠStep s", + "ĠD odd", + "f ew", + "ST D", + "33 2", + "Ġclos ures", + "Ġcomplement ary", + "ĠS asha", + "ump y", + "Ġmon et", + "Ġartic ulate", + "ĠDo ct", + "k iller", + "Ġsc rim", + "Ġ2 64", + "Ġprost itutes", + "Ġse vered", + "Ġattach ments", + "Ġcool ed", + "L ev", + "ĠF alk", + "f ail", + "Ġpolic eman", + "ĠD ag", + "Ġpray ed", + "ĠK ernel", + "Ġcl ut", + "Ġc ath", + "Ġan omaly", + "St orm", + "em aker", + "ĠBreak fast", + "ul i", + "o ire", + "J J", + "h z", + "Oper ation", + "ĠS ick", + "35 4", + "ĠGuatem ala", + "R ate", + "Ġexp osures", + "f aces", + "ĠArch ae", + "ra f", + "ĠM ia", + "Ġ20 25", + "Ġop aque", + "Ġdisgu ised", + "ĠHead quarters", + "S ah", + "Ġp ots", + "9 78", + "ĠM alf", + "Ġfrown ed", + "Ġpoison ous", + "ĠCon vers", + "ee ks", + "Ġcr ab", + ".\" \"", + "Ġtre ason", + "Ġr anc", + "Ġescal ating", + "Ġwar r", + "Ġmob s", + "Ġl amps", + "ĠSun shine", + "ĠBrun swick", + "Ph ones", + "Ġspe lled", + "ĠSk ip", + "Ġ20 50", + "Ġ19 11", + "ĠPl uto", + "ĠAm end", + "Ġme ats", + "38 7", + "Ġst omp", + "ĠZh ou", + "ĠLevi athan", + "ĠHaz ard", + "ad v", + "ĠOr well", + "Ġal oud", + "Ġb umper", + "ĠAn arch", + "ub untu", + "ĠSer ious", + "f itting", + "ĠOption al", + "ĠCec il", + "RE AM", + "Ġser otonin", + "Ġcultiv ate", + "ag ogue", + "} \\", + "Ġmos ques", + "ĠSun ny", + "Ġre active", + "rev olution", + "ĠL up", + "ĠFed ora", + "Ġdefense man", + "ĠV ID", + "ist ine", + "Ġdrown ing", + "ĠBroad casting", + "Ġthr iller", + "ĠS cy", + "Ġacceler ating", + "Ġdirect s", + "od ied", + "b ike", + "d uration", + "Ġpain fully", + "R edd", + "Ġproduct ions", + "Ġg ag", + "Ġwh ist", + "Ġs ock", + "Ġinf initely", + "ĠConc ern", + "ĠCit adel", + "Ġlie u", + "Ġcand les", + "ogene ous", + "arg er", + "Ġheaven ly", + "inflamm atory", + "Per formance", + "C s", + "ruct ose", + "az aki", + "Ġp essim", + "Ġinf erence", + "Ġpow d", + "ĠZ oe", + "Ġpain ts", + "Ġd azz", + "pt a", + "-------- ---", + "Ġins pir", + "ĠExper imental", + "ĠKn ife", + "reg or", + "b ors", + "Ġshow ers", + "rom eda", + "Ġs aint", + "Ġben ign", + "ĠJ iang", + "Ġenvision ed", + "Ġsh roud", + "IF T", + "H O", + "Ġsh uff", + "ĠI CC", + "Ġse greg", + "Ġrevis it", + "ighth ouse", + "L i", + "Ġsub strate", + "ĠSe as", + "ĠRew ard", + "ĠH ep", + "ĠBr ass", + "s bm", + "Ġelim inates", + "Ġst amina", + "ĠV AT", + "ĠLo an", + "Ġconst raint", + "Ġappropri ated", + "Ġp es", + "ĠA LE", + "r anging", + "Ġ40 4", + "39 2", + "Ġintellectual s", + "ach u", + "Ġrestruct uring", + "ĠLe vin", + "Ġrun es", + "Ġdelight ful", + "Ġcarbohyd rates", + "ĠMod els", + "ĠExp o", + "Ġtransport ing", + "all oc", + "Ġring ing", + "S amsung", + "Ġscarce ly", + "ĠURL s", + "ĠM AS", + "Ġprot otypes", + "Ġnarr ator", + "ĠCPU s", + "cd n", + "ĠBart on", + "Ġdecided ly", + "ĠSh u", + "ix ir", + "oc ious", + "ĠMy st", + "N intendo", + "Ġre use", + "Ġforg iven", + "F ew", + "in ical", + "n at", + "Ġseam less", + "ĠEv a", + "ĠE VE", + "ĠJ O", + "land ers", + "Ġso fter", + "neg ie", + "Ġtrans ient", + "Ġorb ital", + "Ġfulf il", + "ĠK om", + "Hop efully", + "Ġdynam ically", + "ĠHun ger", + "å Ľ", + "ĠArmen ia", + "el man", + "ber to", + "Ġp ige", + "ĠID s", + "lim it", + "Ġve ins", + "Ġso aring", + "p acks", + "Gold en", + "ĠCr ab", + "ist or", + "ĠR PM", + "Ġ$ $", + "g ression", + "Ġjihad ist", + "Ġgam ble", + "Ġcare g", + "Ġinf lated", + "F ace", + "ĠFire arms", + "ĠEm manuel", + "â Ŀ", + "Ġsh ocks", + "gr ab", + "Ġspl end", + "ĠHP V", + "ab ortion", + "Ab ove", + "Ent ity", + "play ers", + "Ġcomm enced", + "ul ence", + "Ġfulfill ment", + "Ġembod iments", + "ĠW elfare", + "Ġha il", + "Ġ< @", + "tt en", + "Ġcat cher", + "ĠJ azeera", + "Ġvolcan o", + "Ġstabil ize", + "ĠHand ler", + "Ġintens ified", + "ĠAb rams", + "Ġhum iliation", + "p aced", + "60 5", + "ĠCent OS", + "Spe cific", + "Ġhe ed", + "ĠC AM", + "ĠGal ile", + "D ie", + "Ġabol ished", + "ĠThom son", + "ĠTe achers", + "ĠW ass", + "j ong", + "ĠIS BN", + "ĠAll ies", + "sh ake", + "å ·", + "v ict", + "How ard", + "Ġde em", + "Ġexceed ingly", + "ĠSmart stocks", + "ib e", + "Ġdoor way", + "Ġcompet ed", + "ig mat", + "Ġnational ists", + "Ġg room", + "ĠKe en", + "Ġdispos able", + "de cl", + "ĠT olkien", + "ĠSche me", + "Ġb iod", + "Ġav id", + "ĠEl on", + "ag ar", + "ĠT SA", + "R oman", + "Ġartific ially", + "Ġadvis ors", + "X L", + "ĠInf erno", + "36 6", + "Ġted ious", + "ĠPhot ography", + "ĠCar rie", + "Ġtro pe", + "ĠSand ra", + "Ġdec imal", + "Que en", + "ĠGund am", + "ĠO M", + "ote ch", + "N BA", + "Ġ19 32", + "Ġent renched", + "ĠMar ion", + "Ġfr aternity", + "Lab our", + "Hen ry", + "Ġlat itude", + "E ither", + "Ġenh ances", + "ĠPot ential", + "Ġsh ines", + "id ad", + "Ġbread th", + "Ġcapac ities", + "ĠðŁ ĻĤ", + "ĠBron x", + "Ġsex es", + "Ġdifferent iation", + "Ġheavy weight", + "ĠT aj", + "d ra", + "Ġmigr ate", + "Ġexhaust ion", + "ĠR UN", + "els ius", + "ĠCu omo", + "Ġgu itars", + "Ġcl ones", + "ĠSom ew", + "ĠP ry", + "------------ -", + "Ġwarr anted", + "cy cles", + "Ġsalv age", + "Ġdis ks", + "R ANT", + "ĠNGO s", + "ĠMart ian", + "\":[ {\"", + "Ġadd icts", + "oj ure", + "il let", + "Ġamazing ly", + "art ments", + "p ixel", + "ĠGPU s", + "Lay out", + "è £", + "ĠTam il", + "ĠBas il", + "Ġimpart ial", + "ĠSt ructure", + "f ork", + "b ryce", + "Ġr idge", + "ĠHamb urg", + "ri ous", + "Ġbl itz", + "cig arettes", + "Ġcan ned", + "40 2", + "Ġiron ically", + "Ġcompassion ate", + "ĠHaw kins", + ". #", + "ĠCat hedral", + "Ġrall ied", + "in ternal", + "Ġqu ota", + "st akes", + "T EXT", + "m om", + "Ġcomple tes", + "Ġ23 8", + "Ġsh rug", + "ãĥ ij", + "ĠN inth", + "Ġrev ise", + "ĠProv ider", + "Ġtre acher", + "Ġqu asi", + "ĠPR ES", + "Ġdep osition", + "Ġconfidential ity", + "iss ors", + "Ġim balance", + "Ġspan ning", + "Ġang ular", + "ĠC ul", + "commun ication", + "ĠNor a", + "ĠGen ius", + "op ter", + "Ġs acked", + "Sp ot", + "Ġfine ly", + "ĠCH R", + "28 2", + "w aves", + "Pal est", + "ĠRo hing", + "N L", + "è ¿", + "Ġsh itty", + "ĠSc alia", + "4 75", + "Pro gress", + "Ġreferen cing", + "Ġclass rooms", + "ab ee", + "Ġs od", + "hes ion", + "70 8", + "ĠZucker berg", + "ĠFin ish", + "ĠScot ia", + "ĠSav ior", + "ĠInstall ation", + "an tha", + "( -", + "Ġ30 2", + "ĠP unk", + "Ġcr ater", + "yout u", + "Ġro ast", + "Ġinflu encing", + "Ġd up", + "ĠJ R", + "ĠG rav", + "Ġstat ure", + "Ġbath rooms", + "A side", + "W iki", + "me an", + "ĠZ ak", + "ĠOn es", + "ĠN ath", + "Ġhyper t", + "Ġcommence ment", + "C ivil", + "Ġmoder ately", + "Ġdistribut ors", + "Ġbreast feeding", + "Ġ9 80", + "ĠS ik", + "ĠC ig", + "ĠAM ER", + "R IP", + "ĠCare er", + "ust ing", + "Ġmess ed", + "Ġe h", + "ĠJ ensen", + "/ $", + "Ġblack mail", + "Ġconvers ions", + "Ġscientific ally", + "Ġmant ra", + "p aying", + "Ġiv ory", + "ĠCour ts", + "OU GH", + "aunt let", + "Ser ial", + "B row", + "ĠH undreds", + "3 23", + "Ġpe e", + "Ġlin ux", + "Ġsub mer", + "ĠPrinc ipal", + "48 5", + "ĠD SL", + "ĠCous ins", + "Ġdoctr ines", + "ĠAthlet ics", + "Ġ3 15", + "ĠK arma", + "Ġatt ent", + "ur ger", + "Ġpresc ribe", + "Ġenc aps", + "ĠC ame", + "Ġsecret ive", + "ĠCr imes", + "d n", + "C lean", + "ĠEgypt ians", + "ĠCar penter", + "Ġ ll", + "H um", + "ĠMil o", + "Ġcapital ists", + "Ġbrief ed", + "T we", + "ĠBas in", + "elve t", + "M os", + "Ġplun ge", + "ĠKa iser", + "ĠFu j", + "ill in", + "Ġsafegu ards", + "Ġo ste", + "ĠOpportun ity", + "ĠM afia", + "ĠCall ing", + "ap a", + "ur ban", + "br ush", + "ill ard", + "c é", + "int elligence", + "ĠL ob", + "ĠDru id", + "Ġsm oother", + "Ġfoot ing", + "Ġmotor ists", + "arc ity", + "Ġmascul inity", + "Ġm ism", + "Ġabdom inal", + "ĠTa vern", + "ĠR oh", + "Ġesc apes", + "s igned", + "Anth ony", + "Ġsacrific ing", + "Ġintim acy", + "Ġan terior", + "ĠK od", + "Ġmot if", + "Ġg raz", + "Ġvisual ization", + "Ġguitar ist", + "ĠTro tsky", + "m agic", + "D ar", + "ĠMor i", + "Ġw ards", + "Ġtoile ts", + "l est", + "Ġtele port", + "ĠSund ays", + "ĠPl at", + "ET S", + "Ġe Sports", + "Pat rick", + "ĠK atherine", + "en ko", + "Ġhas sle", + "ĠM ick", + "gg les", + "Ġh ob", + "aint ain", + "Ġair borne", + "Ġsp ans", + "Ġch ili", + "Ġa perture", + "Ġvolunte ered", + "ĠInc ident", + "ĠF res", + "ĠVeter an", + "augh tered", + "ing o", + "Ġun insured", + "CL OSE", + "Ġf use", + "Ġer otic", + "Ġadvert ise", + "ra ising", + "Text ure", + "Ġatt ends", + "ĠRE AL", + "udd led", + "Ġsm oot", + "Ġ30 5", + "ĠWill is", + "Ġbl ond", + "An alysis", + "ĠV T", + "on ica", + "Ġstrongh old", + "R F", + "N M", + ". >>", + "Ġprosper ous", + "Ġbo asted", + "29 2", + "ĠManufact uring", + "PR ESS", + "g ren", + "Ġpharm acy", + "ĠRoc kefeller", + "k ai", + "Ġth umbs", + "ĠH ut", + "Ġmother board", + "Ġguard ians", + "ĠAl ter", + "ll ular", + "Ġsh ack", + "Ġwise ly", + "Ġback bone", + "erv a", + "Ġsu icides", + "ĠMcG regor", + "ij ah", + "E mer", + "ĠB rav", + "Ġdesign ate", + "P OST", + "produ ced", + "Ġcleans ing", + "irl wind", + "ex istent", + "ĠHum ph", + "ĠPay ne", + "Ġv ested", + "Å ¡", + "Ġstring ent", + "ion a", + "Ġuns ub", + "Ġsum med", + "ĠHer cules", + "sub ject", + "ĠR agnar", + "ĠN os", + "Ġcharacter ization", + "Ġsav vy", + "ĠDaw son", + "ĠCas ino", + "Ġf ri", + "ĠBar rier", + "Ġmis information", + "Ġins ulation", + "Ġcorrid ors", + "Ġair planes", + "ĠNo ct", + "ah i", + "Ġ19 16", + "k b", + "arm ac", + "Ġsh un", + "Ġsche ma", + "Ġhorr ified", + "Ġ23 9", + "aund ers", + "N B", + "i ates", + "er ity", + "ĠSh ard", + "Ġr arity", + "Ġgroup ed", + "ĠGh ana", + "again st", + "ĠBi ological", + "ĠA ware", + "ow ell", + "Ï Ħ", + "ĠBe au", + "sh aw", + "H ack", + "ĠJul ius", + "US S", + "ol son", + "aun a", + "c ru", + "ĠMaur ice", + "ĠI k", + "Ġsequ encing", + "Ġradical s", + "Ġ( ?,", + "v irtual", + "Ġany ways", + "Ġreper c", + "Ġhand lers", + "Ġhes itant", + "é ĥ", + "ĠM F", + "ple mentation", + "ass ociated", + "Ġcampaign ed", + "ĠY ue", + "ut ations", + "ĠY oga", + "Ġsim mer", + "Ġro ds", + "Ġmel ody", + "Ġconv oy", + "v ideos", + "Ġscreen ed", + "N eg", + "ochem ical", + "Ġ( ))", + "Ġultr as", + "Ġant ip", + "ĠIsland ers", + "70 4", + "Ġfet ish", + "Ġridic ulously", + "ĠK art", + "Ġmitochond rial", + "Ġinterf ering", + "Build er", + "Ġover fl", + "Ġac ne", + "ĠM ud", + "ĠK err", + "f lex", + "ĠPost al", + "ĠBalt ic", + "47 7", + "ĠPers ons", + "our age", + "H B", + "ĠM use", + "ĠImm ortal", + "ĠDri ving", + "Ġpet itions", + "Ġsubsc ript", + "Ġs orce", + "ĠProcess or", + "ut on", + "S ony", + "Ġph on", + "Ġr aced", + "ĠAnth rop", + "Ġday time", + "ĠEx ercise", + "Add ing", + "Ġeng ages", + "ĠQual comm", + "Ġmir acles", + "Ġmem es", + "ĠDr ink", + "ĠOri oles", + "Ġhair s", + "ĠPol ar", + "ath om", + "Ġsl ippery", + "ĠR emy", + "Ġcar amel", + "ĠY EAR", + "Ġal k", + "I gn", + "a ution", + "ĠMer lin", + "ĠC ran", + "Ġap ologies", + "Ġ4 10", + "Ġout ing", + "ĠMem ories", + "app ointed", + "Ġcount ered", + "u ld", + "pos ing", + "Ġfire wall", + "ĠW ast", + "ĠW et", + "work ed", + "se ller", + "Ġrepe aled", + "ere o", + "ass uming", + "BL IC", + "m ite", + "ĠCEO s", + "ĠChap el", + "ellig ent", + "________________ ________", + "D og", + "Ġw art", + "Ġsubsc riber", + "s ports", + "Ġbe gged", + "ĠM V", + "Ġsem if", + "eth ical", + "Ġpre ach", + "Ġrev ital", + "Ġpun itive", + "Ġshort cuts", + "Ġinstit uted", + "ĠWars aw", + "Ġabdom en", + "ĠK ING", + "Ġsuper intendent", + "Ġf ry", + "ĠGe o", + "T OR", + "Ġcontrad ictions", + "apt ic", + "Ġlandsc apes", + "b ugs", + "Ġcl ust", + "Ġvol ley", + "c ribed", + "Ġt andem", + "Ġrob es", + "WH AT", + "Ġpromot er", + "Ġel oqu", + "review ed", + "ĠD K", + "ĠPl ato", + "Ġf ps", + "T ank", + "ĠDer rick", + "Ġpriorit ize", + "as per", + "ĠHond uras", + "ĠCom pleted", + "ne c", + "Ġm og", + "n ir", + "ĠMay o", + "DE F", + "st all", + "in ness", + "ĠVolks wagen", + "Ġprec aution", + "ĠM ell", + "i ak", + "ist ries", + "Ġ24 8", + "Ġoverl apping", + "Sen ate", + "ĠEnh ance", + "res y", + "rac ial", + "OR TS", + "ĠM ormons", + "Str ong", + "ĠCo ch", + "Mex ico", + "ĠMad uro", + "Ġj ars", + "Ġcan e", + "W ik", + "oll a", + "iff erence", + "Ġphysic ist", + "ĠMag gie", + "Ġ28 5", + "Ġdep iction", + "ĠMcL aren", + "J u", + "Ġsl ows", + "Ġcommission ers", + "ĠWill ow", + "ĠExpl os", + "hov ah", + "Ġtechn ician", + "Ġhom icides", + "ĠFl av", + "ĠTr uman", + "Ġ100 00", + "u ctor", + "Ġsh ader", + "News letter", + "45 7", + "Ġre ver", + "Ġhard ened", + "Ġwhere abouts", + "Ġrede velop", + "Ġcar bs", + "Ġtra vers", + "Ġsqu irrel", + "Ġfoll ower", + "Ġs ings", + "50 8", + "Ġrabb its", + "emon ium", + "Ġdocument ing", + "Ġmisunder stood", + ") '", + "R ick", + "gg ies", + "Ġprem ie", + "Ġsk ating", + "Ġpass ports", + "Ġf ists", + "aged don", + "H aw", + "AC P", + "0 80", + "ĠThough ts", + "ĠCarl son", + "Ġpriest hood", + "h ua", + "Ġdun geons", + "ĠLo ans", + "Ġant is", + "Ġfamiliar ity", + "ĠS abb", + "op al", + "ĠIn k", + "st rike", + "Ġc ram", + "Ġlegal ized", + "Ġcu isine", + "Ġfib re", + "Tra vel", + "ĠMon ument", + "OD Y", + "eth y", + "Ġinter state", + "ĠP UR", + "em porary", + "ĠArab ian", + "develop ed", + "Ġsadd le", + "Ġg ithub", + "ĠOff er", + "ĠIS P", + "ro let", + "ĠSUP ER", + "ĠDen is", + "Ġmultipl ier", + "Ġstir red", + "Interest ingly", + "Ġcustom ary", + "Ġbill ed", + "he x", + "Ġmultipl ied", + "Ġfl ipping", + "ĠCros by", + "Ġfundament als", + "ia e", + "ĠPlay ed", + "ĠAt om", + "am azon", + "ĠFl am", + "ee z", + "activ ated", + "Ġtables poon", + "Ġliberal ism", + "ĠPal in", + "ĠP atel", + "N um", + "ĠT AM", + "Ġs urn", + "ĠRel oaded", + "Ġco ined", + "\" ],", + "ĠCl ash", + "ĠAg u", + "Ġprag matic", + "ĠActiv ate", + "Ġ8 02", + "Ġtrail ers", + "Ġsil hou", + "Ġprob es", + "Ġcirc us", + "ĠB ain", + "ĠLind say", + "ĠAb bey", + "Del ivery", + "Ġconcess ion", + "Ġgast ro", + "ĠSpr ite", + "Ä Ł", + "and el", + "Ġg imm", + "Ġaut obi", + "ĠT urtle", + "Ġwonder fully", + "ĠHar am", + "ĠWorld wide", + "ĠHand le", + "Ġtheor ists", + "Ġsle ek", + "ĠZh u", + "ograph ically", + "EG A", + "ĠOwn ers", + "ath s", + "ĠAntar ctic", + "n atal", + "=\" \"", + "fl ags", + "`` ``", + "Ġs ul", + "K h", + "Ġpot assium", + "Ġlinem an", + "Ġcere al", + "ĠSe asons", + "Ġ20 22", + "Ġmat hematic", + "Ġastron omers", + "prof essional", + "Ġf ares", + "cknow led", + "Ġch i", + "Ġyoung sters", + "Ġmistaken ly", + "Ġhem isphere", + "ĠDiv inity", + "r one", + "Ġ\" ,", + "r ings", + "Ġattract s", + "v ana", + "å ¹", + "C AP", + "Ġplay list", + "Ġpor ch", + "ãģ £", + "Ġincorpor ates", + "Ġso ak", + "Ġassert ing", + "ĠTerror ism", + "ĠP ablo", + "J a", + "ces ter", + "Ġfear ing", + "ĠPr ayer", + "Ġescal ated", + "G W", + "Ġro be", + "ĠBright on", + "ac ists", + "ĠSym phony", + "ĠDwar f", + "ĠPar ade", + "ĠLe go", + "Ġinex pl", + "Ġl ords", + "le af", + "RA G", + "l iber", + "Ġcig ars", + "ĠJe hovah", + "60 6", + "WIND OWS", + "ĠLiber ia", + "eb us", + "He avy", + "Ġl ubric", + "ĠR W", + "angu ages", + "Ġnarrow ed", + "com puter", + "ĠE mber", + "Ġmurder ing", + "Ġdown stream", + "ĠT uls", + "ĠT ables", + "Top ic", + "ĠAcc uracy", + "= /", + "l ost", + "ĠRe i", + "Ġprogress es", + "b ear", + "Ġestablish ments", + "Just in", + "ĠPe ach", + "ĠG omez", + "å ¿", + "ĠTri angle", + "Id ent", + "ĠH ive", + "Res ources", + "Ġmix es", + "ĠAss uming", + "M u", + "Ġhyp oc", + "Ġs ane", + "ĠW an", + "id ious", + "Su ccess", + "Ġ io", + "Ang el", + "Ġdanger ously", + "ĠCreat ure", + "W ORK", + ": [", + "ĠKat rina", + "List ener", + "M iller", + "ĠId lib", + "h ang", + "Ġcircum vent", + "h ref", + "Ġcel estial", + "ĠWe eks", + "ĠP ug", + "ĠDal ton", + "Ġsubpoen a", + "uk u", + "Ġpers isted", + "pe i", + "old ing", + "ĠDoc uments", + "ĠH ast", + "ĠC ENT", + "Ġprim er", + "Ġsyn onymous", + "Ġn ib", + "om bs", + "Ġnot ation", + "ĠD ish", + "ĠAt mosp", + "Ġforb id", + "ĠAN G", + "pat tern", + "l os", + "Ġproject iles", + "b rown", + ".\" ,", + "ĠVen om", + "Ġfierce ly", + "ub lished", + "ĠU ran", + "ĠNic arag", + "4 10", + "ĠC AL", + "OT OS", + "ĠMir acle", + "ĠEn chant", + "Ġguard ing", + "app end", + "Att ach", + "Ġlevel ed", + "Ġcond oms", + "ih ilation", + "64 9", + "Ġnight mares", + "ĠTHE Y", + "ĠST ART", + "ĠK inn", + "Ġroomm ate", + "Ġhy giene", + "o pping", + "J ob", + "Ġl vl", + "ĠV ER", + "ĠKe eping", + "ab etic", + "Ġformat ting", + "eral a", + "Ġrev isions", + "Ġres urg", + "T el", + "ĠGood man", + "35 3", + "p od", + "Ġind isp", + "ĠTrans lation", + "Ġg own", + "ĠM und", + "Ġc is", + "Ġby stand", + "col lect", + "ĠPun jab", + "act ively", + "ĠG amb", + "te ll", + "Ġimport ing", + "g encies", + "Ġloc om", + "ĠBr ill", + "H oly", + "ĠBer ger", + "Ġshow down", + "Ġrespond ers", + "IL Y", + "Ġt akedown", + "le ted", + "Ġmat tered", + "Ġpredict ive", + "Ġover lay", + "G PU", + "ĠV ick", + "Ġconvey ed", + "T ab", + "pe er", + "Sc an", + "Ġdefensive ly", + "v ae", + "Ġappro ving", + "Ġt iers", + "ĠV ia", + "quer ade", + "ĠSaud is", + "Ġdemol ished", + "ĠProp he", + "Ġmon o", + "Ġhospital ity", + "H AM", + "ĠAri el", + "M OD", + "ĠTor ah", + "Ġbl ah", + "ĠBel arus", + "erent ial", + "ĠT uc", + "Ġbank er", + "39 7", + "Ġmosqu it", + "ĠScient ist", + "ĠMus ical", + "Ġh ust", + "Sh ift", + "Ġtor ment", + "Ġstand off", + "E duc", + "ĠF og", + "Ġampl ifier", + "Sh ape", + "Inst ance", + "ĠCrit ics", + "Ġda emon", + "H ouston", + "Ġmatt ress", + "ĠID F", + "Ġobsc ene", + "ĠA mer", + "hett i", + "Ġcomp iling", + "35 2", + "vere tt", + "ĠRed uction", + "ist ration", + "ĠBl essed", + "ĠB achelor", + "3 16", + "Ġpr ank", + "ĠVul can", + "dd ing", + "Ġm ourning", + "ĠQu int", + "ĠBl aster", + "test ing", + "Ġsed iment", + ">> >", + "ĠE ternity", + "ĠWH ERE", + "ĠM aze", + "Ġreact ing", + "ĠAl v", + "oms day", + "ĠC RA", + "Ġtransl ator", + "Ġbog us", + "at u", + "We bsite", + "oll s", + "Ġbapt ism", + "Ġs ibling", + "ĠAut umn", + "ve z", + "ãģ® é", + "gu ards", + "Ge org", + "assad ors", + "ĠFre ud", + "Ġcontin ents", + "ĠReg istry", + "Bern ie", + "ĸļ 士", + "Ġtoler ant", + "ĠU W", + "Ġhor ribly", + "99 5", + "ĠMID I", + "Ġimpat ient", + "oc ado", + "er i", + "ĠWor st", + "ĠNor ris", + "ĠTalk ing", + "Ġdef ends", + "ens able", + "Ġ20 21", + "Ġanat omy", + "L ew", + "Ġdraw er", + "ĠCan berra", + "Ġpatri otic", + "é¾įå ĸļ士", + "ĠAv g", + "AR M", + "Ġundis closed", + "Ġfare well", + "45 9", + "b able", + "ĠAll ison", + "OL OG", + "Ġcon co", + "t ight", + "ĠAC PI", + "ĠM ines", + "l ich", + "ĠâĶ ľ", + "represent ed", + "200 000", + "Ġenthusi ast", + "OT S", + "b il", + "ĠIng redients", + "Ġinvent or", + "ĠMy SQL", + "³³ Âł", + "ĠAB OUT", + "with in", + "Ġm k", + "B ul", + "ĠF ake", + "Ġdracon ian", + "W a", + "hel m", + "ĠTer ran", + "erv ille", + "Ġcommon place", + "SI ZE", + "Ġ\" <", + "re place", + "ograph s", + "ĠSE LECT", + "inc ible", + "ĠMost ly", + "ĠShe ffield", + "ĠID E", + "ugg le", + "Ġcit ations", + "h urst", + "ĠUn ix", + "Ġunle ash", + "ĠP iper", + "ĠN ano", + "Ġsucc umb", + "Ġreluct ance", + "Ġ25 00", + "ĠMer chant", + "Ġwire t", + "Ġcomb os", + "ĠBirth day", + "Ġchar coal", + "ĠU PS", + "ĠFair fax", + "Ġdrive way", + "ĠT ek", + "ĠP itch", + "ove re", + "Ġtechn icians", + "ĠAct ual", + "fl ation", + "ĠF iscal", + "ĠEm pty", + "an amo", + "Ġmag nesium", + "Ġsl ut", + "Ġgrow ers", + "Invest igators", + "( ):", + "ĠS atellite", + "ĠKe ynes", + "miss ive", + "l ane", + "Ġb orough", + "3 44", + "ĠTE AM", + "ĠBet hesda", + "C V", + "h ower", + "ĠR AD", + "Ġch ant", + "ĠR iy", + "Ġcompos itions", + "Ġmild ly", + "Ġmedd ling", + "Ġag ility", + "ane ers", + "5 01", + "Ġsyn th", + "ling er", + "29 1", + "Ġex claimed", + "Part y", + "Ġcont amin", + "ĠMan or", + "ĠResp ond", + "Ġpra ising", + "Ġman ners", + "fle et", + "Sum mer", + "ĠLy nd", + "ĠDef initely", + "gr im", + "Ġbow ling", + "st ri", + "ç Ľ", + "y nt", + "Ġmand ates", + "D IV", + "Ġreconc ile", + "view s", + "ĠDam on", + "vet te", + "F lo", + "ĠGreat est", + "il on", + "ic ia", + "Ġportray al", + "Ġcush ion", + "50 4", + "19 79", + "oss al", + "App lic", + "sc ription", + "Ġmit igation", + "AT S", + "p ac", + "Ġer ased", + "Ġdefic iencies", + "ĠHolland e", + "ĠX u", + "Ġb red", + "Ġpregn ancies", + "f emin", + "Ġem ph", + "Ġpl anners", + "Ġout per", + "utter ing", + "Ġperpet rator", + "Ġm otto", + "ĠEll ison", + "ĠNE VER", + "Ġadmitted ly", + "AR I", + "ĠAzerbai jan", + "Ġmill isec", + "Ġcombust ion", + "ĠBott le", + "ĠL und", + "ĠP s", + "ĠD ress", + "Ġfabric ated", + "Ġbat tered", + "Ġs idel", + "ĠNot ting", + "Fore ign", + "ĠJer ome", + "0 20", + "ĠAr bit", + "Ġkn ots", + "ĠR IGHT", + "M oving", + "ãģ Ļ", + "Ġsur geries", + "Ġcour thouse", + "Ġm astered", + "Ġhover ing", + "ĠBr an", + "ĠAl ison", + "Ġsaf est", + "m ilitary", + "Ġbull ied", + "Ġbar rage", + "Read er", + "ES E", + "ĠGe ographic", + "T ools", + "3 14", + "ĠGe ek", + "ro th", + "gl ers", + "ĠF IN", + "Ï ģ", + "ĠA ston", + "al tern", + "48 8", + "Ġveter in", + "G amer", + "Ġint el", + "ren ches", + "Sh ield", + "Ġam nesty", + "ĠB har", + "Ġp iled", + "Ġhonor able", + "ĠInst itutes", + "Ġso aked", + "Ġcom a", + "ĠE FF", + "34 1", + "by tes", + "ĠG mail", + "le in", + "ĠCanad iens", + "m aterial", + "I l", + "Ġinstruct ors", + "ĠK Y", + "Ġconce ive", + "ub b", + "ĠP ossible", + "Ġeas ing", + "ĠChrist ina", + "Ġcar ic", + "ĠHD R", + "R OM", + "Ġsho vel", + "de lete", + "Ġp uff", + "ĠCh anging", + "Ġseam lessly", + "Att ribute", + "Ġacqu isitions", + "ak ery", + "ĠE F", + "Ġaut istic", + "ĠT akes", + "ĠPow der", + "ĠSt ir", + "5 10", + "ĠBub ble", + "sett ings", + "ĠF owler", + "Ġmust ard", + "Ġmore over", + "Ġcopyright ed", + "ĠLED s", + "15 00", + "æ ī", + "ĠH IS", + "en f", + "Ġcust od", + "ĠH uck", + "G i", + "Ġim g", + "An swer", + "C t", + "j ay", + "ĠInf rastructure", + "Ġfeder ally", + "L oc", + "Ġmicro bes", + "Ġover run", + "dd s", + "ot ent", + "adi ator", + ">>>> >>>>", + "Ġtorn ado", + "Ġadj ud", + "Ġintrig ued", + "Ġs i", + "ĠRevel ation", + "pro gress", + "Ġburgl ary", + "ĠSai yan", + "ĠK athy", + "Ġser pent", + "ĠAndre as", + "Ġcomp el", + "ess ler", + "ĠPl astic", + "ĠAd vent", + "ĠPos itive", + "ĠQ t", + "ĠHind us", + "reg istered", + "ular ity", + "Ġrighteous ness", + "Ġdemon ic", + "u itive", + "ĠB DS", + "ĠGre gg", + "c ia", + "ĠCrus ade", + "ĠSina i", + "W ARE", + "+ (", + "Ġme ll", + "Ġder ail", + "y ards", + "A st", + "Ġnotice ably", + "ĠO ber", + "R am", + "Ġun noticed", + "Ġse q", + "av age", + "T s", + "Ġ6 40", + "Ġconced e", + "Ġ] )", + "F ill", + "Ġcapt ivity", + "ĠImprove ment", + "ĠCrus ader", + "ara oh", + "M AP", + "æ Ĺ", + "Ġstr ide", + "al ways", + "F ly", + "N it", + "Ġal gae", + "ĠCook ing", + "ĠDo ors", + "Mal ley", + "Ġpolic emen", + "ãģ į", + "Ġastron aut", + "access ible", + "49 5", + "ĠR AW", + "cl iffe", + "udic rous", + "Ġdep ended", + "al ach", + "Ġvent ures", + "ra ke", + "Ġt its", + "ĠH ou", + "Ġcond om", + "ormon al", + "Ġind ent", + "Ġupload ing", + "Foot note", + "Import ant", + "Ġ27 1", + "Ġmind ful", + "Ġcont ends", + "C ra", + "Ġcal ibr", + "ĠO ECD", + "plug in", + "F at", + "ĠIS S", + "ĠDynam ics", + "ans en", + "68 6", + "' ),", + "Ġsp rite", + "Ġhand held", + "ĠH ipp", + "=~ =~", + "Tr ust", + "Ġsem antics", + "ĠBund es", + "ĠRen o", + "ĠLiter ature", + "s ense", + "G ary", + "ĠA eg", + "ĠTr in", + "EE K", + "Ġcler ic", + "ĠSS H", + "Ġch rist", + "Ġinv ading", + "ib u", + "Ġen um", + "aur a", + "Ġal lege", + "ĠInc redible", + "B BC", + "Ġth ru", + "Ġsa iled", + "Ġem ulate", + "Ġin security", + "Ġc rou", + "Ġaccommod ations", + "Ġincompet ent", + "Ġsl ips", + "ĠEarth qu", + "s ama", + "IL LE", + "Ġi Phones", + "as aki", + "Ġby e", + "Ġar d", + "Ġext ras", + "Ġsl aughtered", + "Ġcrowd funding", + "res so", + "Ġfil ib", + "ĠER ROR", + "ĠT LS", + "e gg", + "ĠIt al", + "Ġen list", + "ĠCatal onia", + "ĠSc ots", + "Ġser geant", + "Ġdiss olve", + "N H", + "Ġstand ings", + "ri que", + "I Q", + "Ġbenef iciary", + "Ġaqu arium", + "You Tube", + "ĠPower Shell", + "Ġbright est", + "ĠWar rant", + "S old", + "Writ ing", + "Ġbegin nings", + "ĠRes erved", + "ĠLatin os", + "head ing", + "Ġ4 40", + "Ġrooft op", + "AT ING", + "Ġ3 90", + "VP N", + "G s", + "k ernel", + "turn ed", + "Ġprefer able", + "Ġturn overs", + "ĠH els", + "S a", + "ĠShin ji", + "ve h", + "ĠMOD ULE", + "V iol", + "Ġex iting", + "Ġj ab", + "ĠVan illa", + "Ġac ron", + "ĠG ap", + "ber n", + "A k", + "ĠMc Gu", + "Ġend lessly", + "ĠFar age", + "ĠNo el", + "V a", + "M K", + "Ġbr ute", + "ĠK ru", + "ĠES V", + "ĠOl ivia", + "âĢ ł", + "ĠK af", + "Ġtrust ing", + "Ġh ots", + "3 24", + "Ġmal aria", + "Ġj son", + "Ġp ounding", + "ort ment", + "Count ry", + "Ġpostp oned", + "Ġunequ iv", + "? ),", + "ĠRo oney", + "udd ing", + "ĠLe ap", + "ur rence", + "sh apeshifter", + "ĠH AS", + "os ate", + "Ġca vern", + "Ġconserv atism", + "ĠB AD", + "Ġmile age", + "Ġarrest ing", + "V aults", + "Ġmix er", + "Dem ocratic", + "ĠB enson", + "Ġauth ored", + "8 000", + "Ġpro active", + "ĠSpirit ual", + "t re", + "Ġincarcer ated", + "ĠS ort", + "Ġpe aked", + "Ġwield ing", + "re ciation", + "×Ļ ×", + "P atch", + "ĠEm my", + "Ġex qu", + "tt o", + "ĠRat io", + "ĠP icks", + "ĠG ry", + "ph ant", + "Ġf ret", + "Ġeth n", + "Ġarch ived", + "% -", + "c ases", + "ĠBl aze", + "Ġim b", + "c v", + "y ss", + "im ony", + "Ġcount down", + "Ġaw akening", + "ĠTunis ia", + "ĠRe fer", + "ĠM J", + "Ġun natural", + "ĠCar negie", + "iz en", + "ĠN uggets", + "he ss", + "Ġev ils", + "64 7", + "Ġintrodu ctory", + "l oving", + "ĠMcM ahon", + "Ġambig uity", + "L abel", + "ĠAlm ighty", + "Ġcolor ing", + "ĠCl aus", + "set ting", + "N ULL", + "ĠF avorite", + "ĠS IG", + "> (", + "ĠSh iva", + "ĠMay er", + "Ġstorm ed", + "ĠCo verage", + "we apons", + "igh am", + "Ġun answered", + "Ġle ve", + "Ġc oy", + "c as", + "b ags", + "as ured", + "Se attle", + "ĠSant orum", + "ser ious", + "Ġcourage ous", + "ĠS oup", + "Ġconfisc ated", + "Ġ// /", + "Ġuncon ventional", + "Ġmom s", + "ĠRohing ya", + "ĠOrche stra", + "ĠPot ion", + "Ġdisc redit", + "ĠF IL", + "f ixed", + "ĠDe er", + "do i", + "ĠDim ension", + "Ġbureaucr ats", + "et een", + "Ġaction Group", + "oh m", + "Ġb umps", + "ĠUt ility", + "Ġsubmar ines", + "ren heit", + "re search", + "ĠShap iro", + "Ġsket ches", + "Ġde ceptive", + "ĠV il", + "es ame", + "ĠEss entially", + "Ġramp age", + "isk y", + "Ġmut tered", + "th ritis", + "Ġ23 6", + "f et", + "b ars", + "Ġpup il", + "ĠTh ou", + "o S", + "s ong", + "Ġfract ured", + "Ġre vert", + "pict ure", + "Ġcrit erion", + "us her", + "Ġreperc ussions", + "ĠV intage", + "ĠSuper intendent", + "Offic ers", + "Ġflag ged", + "Ġbl ames", + "Ġin verse", + "ograp hers", + "Ġmakes hift", + "Ġdev oid", + "Ġfoss ils", + "ĠArist otle", + "ĠFund s", + "Ġde pleted", + "ĠFl u", + "ĠY uan", + "Ġw oes", + "Ġlip id", + "Ġsit u", + "requ isites", + "Ġfurn ish", + "ĠSam ar", + "Ġshame ful", + "Ġadverse ly", + "Ġad ept", + "Ġrem orse", + "Ġmurder ous", + "uck les", + "ĠE SL", + "Ġ3 14", + "s ent", + "Ġred ef", + "ĠC ache", + "ĠP urs", + "ig ans", + "Ġ4 60", + "Ġpres criptions", + "Ġf res", + "F uck", + "ocr ates", + "Tw enty", + "ĠWe ird", + "ĠT oggle", + "ĠC alled", + "itiz ens", + "Ġp oultry", + "Ġharvest ing", + "ãĤ¦ ãĤ¹", + "Bott om", + "Ġcaution ed", + "t n", + "39 6", + "ĠNik ki", + "Ġeval uations", + "Ġharass ing", + "Ġbind ings", + "ĠMon etary", + "Ġhit ters", + "Ġadvers ary", + "un ts", + "Ġset back", + "Ġenc rypt", + "ĠC ait", + "Ġl ows", + "eng es", + "ĠN orn", + "Ġbul bs", + "Ġbott led", + "ĠVoy ager", + "3 17", + "Ġsp heres", + "p olitics", + "Ġsubt ract", + "Ġsens ations", + "Ġapp alling", + "Ġ3 16", + "Ġenvironment ally", + "ĠST EM", + "Ġpub lishes", + "5 60", + "Ġdilig ence", + "48 4", + "Ġadv ises", + "Ġpet rol", + "Ġimag ining", + "Ġpatrol s", + "ĠInt eger", + "ĠAs hes", + "act us", + "ĠRad iant", + "ĠL T", + "it ability", + "ht aking", + "Set ting", + "Ġnu anced", + "ĠRe ef", + "ĠDevelop ers", + "N i", + "pie ces", + "99 0", + "Lic ense", + "Ġlow ers", + "ĠOtt oman", + "3 27", + "oo o", + "Ġqu itting", + "mark ets", + "Beh ind", + "Ġbas in", + "Ġdoc s", + "an ie", + "fl ash", + "ct l", + "Ġcivil ized", + "ĠFuk ushima", + "\"] ,\"", + "ĠK S", + "ĠHonest ly", + "ar at", + "Ġconstruct s", + "ĠL ans", + "ĠD ire", + "ĠLI KE", + "ĠTrou ble", + "Ġwith holding", + "ĠOb livion", + "Ġsan ity", + "any a", + "Con st", + "Ġgro cer", + "ĠC elsius", + "Ġrecount ed", + "ĠW ife", + "B order", + "ate red", + "h appy", + "Ġspo iler", + "Ġlog ically", + "H all", + "Ġsucceed ing", + "Ġpoly morph", + "Ġax es", + "ĠShot gun", + "ĠS lim", + "ĠPrin ciples", + "ĠL eth", + "art a", + "Ġsc or", + "Sc reenshot", + "Ġrelax ation", + "#$ #$", + "Ġdeter rent", + "idd y", + "Ġpower less", + "Ġles bians", + "Ġch ords", + "ĠEd ited", + "se lected", + "Ġseparat ists", + "000 2", + "Ġair space", + "Ġturn around", + "Ġc unning", + "P ATH", + "P oly", + "Ġbomb ed", + "Ġt ion", + "x s", + "Ġwith hold", + "Ġw aged", + "ĠLiber ties", + "Fl ag", + "Ġcomfort ing", + "45 4", + "ĠI ris", + "are rs", + "Ġr ag", + "Ġrel ocated", + "ĠGu arant", + "Ġstrateg ically", + "Ġgam ma", + "uber ty", + "ĠLock heed", + "g res", + "Ġgr illed", + "ĠLow e", + "st ats", + "ĠR ocks", + "Ġsens ing", + "Ġrent ing", + "ĠGe ological", + "ا Ø", + "ot rop", + "Ġse w", + "Ġimproper ly", + "48 6", + "Ġâĸ ł", + "Ġstar ving", + "ĠB j", + "Disc ussion", + "3 28", + "ĠCom bo", + "ĠFix es", + "N AT", + "Ġstri ving", + "th ora", + "Ġharvest ed", + "ĠP ing", + "Ġplay ful", + "Ġaven ues", + "Ġoccup ational", + "Ġw akes", + "ĠCou rier", + "Ġdrum mer", + "ĠBrow ser", + "ĠH outh", + "it u", + "Ġapp arel", + "p aste", + "Ġhun ted", + "ĠSecond ly", + "l ain", + "X Y", + "ĠP IN", + "ic ons", + "Ġcock tails", + "Ġs izable", + "Ġhurd les", + "est inal", + "ĠRecre ation", + "Ġe co", + "64 8", + "ĠD ied", + "m int", + "Ġfinger prints", + "Ġdis pose", + "ĠBos nia", + "ts y", + "22 00", + "Ġins pected", + "ĠF ou", + "Ġf uss", + "Ġamb ush", + "ĠR ak", + "Ġmanif ested", + "Pro secut", + "Ġsuff ice", + "ren ces", + "Ġcompens ated", + "ĠC yrus", + "Ġgen us", + "ĠWolver ine", + "ĠTrend s", + "Ġh ikes", + "ĠSe en", + "Ġen rol", + "C old", + "Ġpol itely", + "ĠSl av", + "ĠRu pert", + "Ġey ewitness", + "ĠAl to", + "Ġun comp", + "Ġposter ior", + "M ust", + "ĠHer z", + "Ġprogress ively", + "Ġ23 4", + "Ġind ifference", + "ĠCunning ham", + "Ġacadem ia", + "Ġse wer", + "Ġast ounding", + "ĠA ES", + "r ather", + "Ġeld est", + "Ġclim bs", + "ĠAdd s", + "Ġout cry", + "Ġcont ag", + "ĠH ouses", + "Ġpe pt", + "ĠMel ania", + "interest ed", + "ĠU CH", + "ĠR oots", + "ĠHub bard", + "ĠT BD", + "ĠRoman ian", + "fil ename", + "St one", + "ĠIm pl", + "Ġchromos ome", + "C le", + "d x", + "Ġscram bled", + "ĠP t", + "Ġ24 2", + "OP LE", + "Ġtremend ously", + "St reet", + "Ġcra ving", + "Ġbund led", + "ĠR G", + "p ipe", + "Ġinj uring", + "Ġarc ane", + "Part icip", + "ĠHero ic", + "st y", + "Ġto pping", + "ĠTemp est", + "rent ices", + "b h", + "Ġpar anoia", + "ĠUnic ode", + "Ġegreg ious", + "Ġ\\ '", + "ĠOsw ald", + "Ġgra vel", + "ĠSim psons", + "Ġbl and", + "ĠGuant anamo", + "Writ er", + "lin ers", + "ĠD ice", + "J C", + "Ġpar ity", + "Ġs ided", + "Ġ23 7", + "ĠPyr rha", + "at ters", + "d k", + "F ine", + "comp an", + "Ġform ulated", + "ĠId ol", + "il ers", + "hem oth", + "ĠF av", + "Ġintr usion", + "Ġcar rots", + "ĠL ayer", + "ĠH acker", + "Ġ ----------------", + "Ġmoder ation", + "é ģ", + "oc oc", + "Ġcharacter ize", + "ĠTe resa", + "Ġsocio economic", + "Ġper k", + "ĠParticip ation", + "tr aining", + "ĠPaul o", + "ph ys", + "Ġtrust worthy", + "Ġembod ied", + "ĠMer ch", + "c urrency", + "ĠPrior ity", + "Ġte asing", + "Ġabsor bing", + "Ġunf inished", + "ĠCompar ison", + "Ġdis ple", + "writ ers", + "Ġprofess ions", + "ĠPengu in", + "Ġang rily", + "ĠL INK", + "68 8", + "ĠCor respond", + "Ġprev ailed", + "Ġcart el", + "l p", + "as ms", + "ĠRed emption", + "ĠIslam ists", + "effect s", + "d ose", + "ĠL atter", + "ĠHal ifax", + "Ġv as", + "ĠTop ics", + "ĠN amed", + "advert ising", + "zz a", + "IC ES", + "Ġret arded", + "ach able", + "ĠPupp et", + "ĠItem Level", + "Ġret ract", + "Ġident ifiable", + "A aron", + "ĠB uster", + "s ol", + "hel le", + "as semb", + "H ope", + "r anged", + "B a", + "ĠP urch", + "é Ģ", + "ĠSir i", + "Ġarri vals", + "Ġ19 12", + "Ġshort ened", + "Ġ3 12", + "Ġdiscrep ancy", + "ĠTem perature", + "ĠWal ton", + "Ġkind erg", + "p olit", + "Ġrem ix", + "Ġconnect ors", + "ãĥĺ ãĥ©", + "ĠKazakh stan", + "dom inated", + "Ġsu gars", + "im ble", + "ĠPan ic", + "ĠDem and", + "ĠCol ony", + "on en", + "ĠM ER", + "7 75", + "ur ia", + "aza ar", + "ĠDeg ree", + "P ri", + "Ġsun shine", + "Ġ25 1", + "Ġpsychedel ic", + "Ġdigit ally", + "ĠBra un", + "Ġsh immer", + "Ġsh ave", + "ĠTel esc", + "ĠAst ral", + "ĠVenezuel an", + "ĠO G", + "Ġc rawling", + "Int eg", + "ĠFe ather", + "Ġunfold ing", + "Ġappropri ation", + "Ġè£ı è", + "ĠMob ility", + "ĠN ey", + "- .", + "b ilt", + "L IN", + "ĠT ube", + "ĠCon versely", + "Ġkey boards", + "ĠC ao", + "Ġover th", + "Ġla ure", + ">> \\", + "ĠV iper", + "ach a", + "Off set", + "ĠR aleigh", + "ĠJ ae", + "J ordan", + "j p", + "Ġtotal itarian", + "Connect or", + "Ġobserv es", + "ĠSpart an", + "ĠIm mediately", + "ĠSc al", + "C ool", + "Ġt aps", + "Ġro ar", + "P ast", + "Ġch ars", + "ĠB ender", + "ĠShe ldon", + "Ġpain ter", + "Ġbe acon", + "ĠCreat ures", + "Ġdownt urn", + "Ġh inder", + "ĠAnd romeda", + "à Ľ", + "cc oli", + "ĠF itness", + "et rical", + "Ġutil izes", + "Ġsen ate", + "Ġen semble", + "Ġche ers", + "T W", + "Ġaff luent", + "k il", + "ry lic", + "ord ering", + "Com puter", + "Ġgru esome", + "ost ics", + "ĠUb isoft", + "ĠKel ley", + "Ġw rench", + "Ġbourgeois ie", + "IB LE", + "ĠPrest on", + "w orn", + "ar ist", + "reat ing", + "Ġst ained", + "ar ine", + "Ġsl ime", + "EN N", + "Ġche sts", + "Ġground water", + "ann ot", + "ĠTr ay", + "ĠLoc ke", + "ĠC TR", + "Ġd udes", + "ĠEx ternal", + "ĠDec oder", + "Ġpar amed", + "ĠMed line", + "80 9", + "ĠD inner", + "rup al", + "g z", + "ĠG um", + "ĠDem o", + "j ee", + "Ġd h", + "ber man", + "arch s", + "Ġen qu", + "ĠEp stein", + "Ġdevast ation", + "Ġfriends hips", + "ĠAr d", + "Ġ23 1", + "ĠRub in", + "ĠDist ance", + "Ġsp urred", + "Ġd ossier", + "Ġover looking", + "\\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\", + "Fore st", + "ĠCom es", + "\\ \",", + "ĠIran ians", + "Ġf ixtures", + "L aughs", + "Ġcur ry", + "ĠKing ston", + "Ġsqu ash", + "Ġcat alogue", + "Ġabnormal ities", + "Ġdigest ive", + ".... .....", + "Ġsubord inate", + "og ly", + "Ġ24 9", + "M iddle", + "Ġmass ac", + "Ġburg ers", + "Ġdown stairs", + "Ġ19 31", + "39 4", + "ĠV G", + "Ġl asers", + "ĠS ikh", + "ĠAlex a", + "der ived", + "Ġcycl ist", + "ãģ® éŃĶ", + "onel iness", + "!!!! !!!!", + "Ġbuff s", + "leg ate", + "Ġrap ing", + "Ġrecomm ending", + "ro red", + "Ġmult icultural", + "un ique", + "Ġbusiness men", + "Ġune asy", + "ĠM AP", + "Ġdisp ersed", + "cipl ine", + "J ess", + "ĠK erala", + "å §", + "Ġabst raction", + "Sur v", + "U h", + "Ġprin ters", + "ij a", + "ow der", + "Ġanalog ous", + "ĠA SP", + "af er", + "Ġunfold ed", + "Ġlevel ing", + "Ġbre ached", + "ĠH earing", + "Ġn at", + "Ġtransl ating", + "crit ical", + "Ġant agonist", + "ĠYes terday", + "Ġfuzz y", + "w ash", + "m ere", + "Ġbe wild", + "ĠM ae", + "V irgin", + "ph rase", + "Ġsign aled", + "ĠH IGH", + "Ġprot ester", + "Ġgar ner", + "unk nown", + "Ġk ay", + "Ġabduct ed", + "Ġst alking", + "am n", + "Ġdes erving", + "ĠR iv", + "ĠJ orge", + "Ġscratch ing", + "ĠS aving", + "ip ing", + "Ġte ase", + "Ġmission ary", + "ĠMor row", + "T IME", + "P resent", + "Ġchem otherapy", + "tern ess", + "ĠH omes", + "ĠP urdue", + "Ġst aunch", + "ĠWhit ney", + "ĠTH ERE", + "Î ¼", + "iat us", + "ĠErn est", + "ĠDe ploy", + "Ġcove ted", + "F ML", + "ĠDial ogue", + "Ġex ited", + "f ruit", + "Ġner d", + "\":\" \",\"", + "Ġv ivo", + "ru ly", + "4 60", + "ĠAm en", + "rehens ible", + "Ġâ ĺ", + "D IR", + "Ġad herence", + "Ġche w", + "ĠCo ke", + "ĠSerge i", + "dig ital", + "ĠNe ck", + "g ently", + "enth al", + "/ )", + "Ġwe ary", + "Ġgu ise", + "ĠConc ord", + "ĠOn ion", + "at cher", + "Ġb inge", + "ĠDirect ive", + "Ġman ned", + "ans k", + "Ġill usions", + "Ġbillion aires", + "38 3", + "oly n", + "odynam ic", + "ĠWhe at", + "ĠA lic", + "Ġcol oured", + "ĠN AFTA", + "ab o", + "Ġmac ros", + "ind ependent", + "s weet", + "Ġsp ac", + "ĠK abul", + "Ġ Ä", + "em e", + "Ġdict ated", + "Ġsh outs", + "= {", + "Ġr ipping", + "ĠSh ay", + "ĠCr icket", + "direct ed", + "Ġanalys ed", + "ĠWAR RANT", + "ag ons", + "ĠBlaz ers", + "Ġche ered", + "Ġar ithmetic", + "ĠTan z", + "37 3", + "ĠFl ags", + "Ġ29 5", + "Ġw itches", + "ĠIn cluded", + "ĠG ained", + "ĠBl ades", + "G am", + "ĠSam antha", + "ĠAtl antis", + "ĠPr att", + "Ġspo iled", + "ĠI B", + "ĠRam irez", + "Pro bably", + "re ro", + "ĠN g", + "ĠWar lock", + "t p", + "Ġover he", + "Ġadministr ations", + "Ġt int", + "Ġreg iment", + "Ġpist ols", + "Ġblank ets", + "Ġep ist", + "Ġbowl s", + "Ġhydra ulic", + "Ġde an", + "Ġj ung", + "Ġasc end", + "70 5", + "ĠSant iago", + "à ®", + "Ġun avoid", + "ĠSh aman", + "re b", + "Ġstem ming", + "99 8", + "ĠM G", + "st icks", + "esthes ia", + "ER O", + "Ġmor bid", + "ĠGr ill", + "ĠP oe", + "any l", + "Ġdele ting", + "ĠSurve illance", + "Ġdirect ives", + "Ġiter ations", + "ĠR ox", + "ĠMil ky", + "F ather", + "Ġpat ented", + "44 7", + "Ġprec ursor", + "Ġm aiden", + "ĠP hen", + "ĠVe gan", + "ĠPat ent", + "K elly", + "Redd itor", + "Ġn ods", + "Ġvent ilation", + "ĠSchwar z", + "Ġw izards", + "Ġomin ous", + "ĠHe ads", + "ĠB G", + "Ġl umber", + "ĠSp iel", + "Ġis Enabled", + "Ġancest ral", + "ĠSh ips", + "Ġwrest ler", + "ph i", + "Ġy uan", + "ĠRebell ion", + "Ġice berg", + "Ġmag ically", + "Ġdivers ion", + "ar ro", + "yth m", + "ĠR iders", + "ĠRob bie", + "ĠK ara", + "ĠMain tenance", + "ĠHer b", + "Ġhar ms", + "p acked", + "ĠFe instein", + "Ġmarry ing", + "Ġbl ending", + "ĠR ates", + "Ġ18 80", + "Ġwr ink", + "ĠUn ch", + "ĠTor ch", + "desc ribed", + "Ġhuman oid", + "ilit ating", + "ĠCon v", + "ĠFe ld", + "IGH TS", + "Ġwhistlebl ower", + "ort mund", + "ets y", + "arre tt", + "ĠMon o", + "ĠI ke", + "ĠC NBC", + "ĠW AY", + "ĠMD MA", + "ĠIndividual s", + "Ġsupplement al", + "Ġpower house", + "ĠSt ru", + "F ocus", + "aph ael", + "ĠCol leg", + "att i", + "Z A", + "Ġp erenn", + "ĠSign ature", + "ĠRod ney", + "Ġcub es", + "idd led", + "ĠD ante", + "ĠIN V", + "iling ual", + "ĠC th", + "Ġso fa", + "Ġintimid ate", + "ĠR oe", + "ĠDi plom", + "ĠCount ries", + "ays on", + "Ġextrad ition", + "Ġdis abling", + "ĠCard iff", + "Ġmemor andum", + "ĠTr ace", + "Ġ?? ?", + "se ctor", + "ĠRou hani", + "ĠY ates", + "ĠFree ze", + "Ġbl adder", + "M otor", + "ĠProm ise", + "ant asy", + "Ġforesee able", + "ĠC ologne", + "cont ainer", + "ĠTre es", + "ĠG ors", + "ĠSin clair", + "Ġbar ring", + "key e", + "Ġsl ashed", + "ĠStat istical", + "é ĩ", + "Ġâĸ º", + "All ows", + "Ġhum ility", + "Ġdr illed", + "ĠF urn", + "44 3", + "Ġse wage", + "Ġhome page", + "Ġcour tyard", + "Ġv ile", + "Ġsubsid iaries", + "aj o", + "direct ory", + "Ġam mon", + "V ers", + "charg es", + "Ġ} }", + "ĠCh ains", + "Ġ24 6", + "n ob", + "Ġper cept", + "Ġg rit", + "Ġfisher men", + "ĠIraq is", + "ĠDIS TR", + "ĠF ULL", + "ĠEval uation", + "g raph", + "at ial", + "Ġcooper ating", + "Ġmel an", + "Ġenlight ened", + "Ġal i", + "t ailed", + "Ġsal ute", + "Ġweak est", + "ĠBull dogs", + "U A", + "ĠAll oy", + "Ġsem en", + "oc ene", + "ĠWilliam son", + "s pr", + ", âĢĶ", + "ĠG F", + "itt ens", + "Be at", + "ĠJ unk", + "iph ate", + "ĠFarm ers", + "ĠBit coins", + "ig ers", + "d h", + "ĠL oyal", + "p ayer", + "Ġentert ained", + "Ġpenn ed", + "Ġcoup on", + "Que ue", + "Ġweaken ing", + "c arry", + "Ġunderest imate", + "Ġshoot out", + "Ġcharism atic", + "ĠProced ure", + "Ġprud ent", + "in ances", + "Ġric hes", + "Ġcort ical", + "Ġstr ides", + "Ġd rib", + "ĠOil ers", + "5 40", + "ĠPer form", + "ĠBang kok", + "Ġe uth", + "S ER", + "Ġsimpl istic", + "t ops", + "camp aign", + "Q uality", + "Ġimpover ished", + "ĠEisen hower", + "Ġaug ment", + "ĠH arden", + "Ġinterven ed", + "Ġlist ens", + "ĠK ok", + "Ġs age", + "Ġrub bish", + "ĠD ed", + "Ġm ull", + "pe lling", + "Ġvide ot", + "Produ ction", + "D J", + "m iah", + "Ġadapt ations", + "Ġmed ically", + "Ġboard ed", + "Ġarrog ance", + "Ġscra pped", + "Ġopp ress", + "FORM ATION", + "Ġj unction", + "4 15", + "EE EE", + "S kill", + "Ġsub du", + "ĠSug gest", + "ĠP ett", + "Ġle tt", + "ĠMan ip", + "ĠC af", + "ĠCooper ation", + "T her", + "Ġreg ained", + "¶ æ", + "ref lect", + "Ġth ugs", + "ĠShel by", + "Ġdict ates", + "ĠWe iner", + "ĠH ale", + "Ġbatt leground", + "s child", + "Ġcond ol", + "h unt", + "osit ories", + "Ġacc uses", + "Fil ename", + "Ġsh ri", + "Ġmotiv ate", + "Ġreflect ions", + "N ull", + "ĠL obby", + "¥ µ", + "ĠS ATA", + "ĠBack up", + "Ñ ĥ", + "n in", + "ĠCor rection", + "Ġju icy", + "ut ra", + "ĠP ric", + "Ġrest raining", + "ĠAir bnb", + "ĠAr rest", + "Ġappropri ations", + "Ġsl opes", + "Ġmans laughter", + "Ġwork ings", + "ĠH uss", + "ĠF rey", + "Le ave", + "ĠHarm ony", + "ĠF eder", + "Ġ4 30", + "Ġt rench", + "Ġglad ly", + "Ġbull pen", + "ĠG au", + "b ones", + "Ġgro ove", + "Ġpre text", + "ã ħĭ", + "Ġtransm itter", + "ĠComp onent", + "Ġunder age", + "ĠEm pires", + "T ile", + "Ġo y", + "ĠMar vin", + "ĠC AS", + "Ġbl oss", + "Ġrepl icated", + "ĠMar iners", + "Marc us", + "ĠBl ocks", + "Ġliber ated", + "Ġbutter fly", + "Fe el", + "Ġfer mentation", + "Ġyou tube", + "Ġoff end", + "ĠTer m", + "res ist", + "Ġcess ation", + "Ġinsurg ency", + "Ġb ir", + "ĠRa ise", + "59 5", + "Ġhypothes es", + "50 2", + "Ġpl aque", + "ocr at", + "Ġjack ets", + "ĠHuff Post", + "am ong", + "Ġconf er", + "48 7", + "ĠL illy", + "Ġadapt ing", + "ĠF ay", + "Ġsh oved", + "ve c", + "Ġref ine", + "Ġg on", + "Ġgun men", + "z ai", + "ĠShut tle", + "ĠI zan", + "Ġ19 13", + "Ġple thora", + "· ·", + "Ġ5 10", + "Ġp uberty", + "Ġ24 1", + "ĠWe alth", + "ĠAl ma", + "ĠM EM", + "ĠAd ults", + "C as", + "pr ison", + "R ace", + "Ġwater proof", + "Ġathlet icism", + "Ġcapital ize", + "ĠJu ice", + "Ġillum inated", + "ĠP ascal", + "Ġirrit ation", + "ĠWitness es", + "ad le", + "ĠAst ro", + "Ġf ax", + "ĠEl vis", + "Prim ary", + "ĠL ich", + "ĠEl ves", + "Ġres iding", + "Ġst umble", + "3 19", + "ĠP KK", + "Ġadvers aries", + "D OS", + "ĠR itual", + "Ġsm ear", + "Ġar son", + "ident al", + "Ġsc ant", + "Ġmon archy", + "Ġhal ftime", + "Ġresid ue", + "Ġind ign", + "ĠSh aun", + "ĠEl m", + "aur i", + "A ff", + "W ATCH", + "ĠLy on", + "hel ps", + "36 1", + "Ġlobby ist", + "Ġdimin ishing", + "Ġout breaks", + "Ġgo ats", + "f avorite", + "ĠN ah", + "son ian", + "ĠBo oster", + "Ġsand box", + "ĠF are", + "ĠMalt a", + "Ġatt Rot", + "ĠM OR", + "ld e", + "Ġnavig ating", + "T ouch", + "Ġunt rue", + "ĠDis aster", + "Ġl udicrous", + "Pass word", + "ĠJ FK", + "blog spot", + "4 16", + "ĠUN DER", + "ern al", + "Ġdelay ing", + "T OP", + "Ġimpl ants", + "ĠAV G", + "ĠH uge", + "att r", + "Ġjournal istic", + "ĠPe yton", + "ĠI A", + "R ap", + "go al", + "ĠProgram me", + "Ġsm ashing", + "w ives", + "print ln", + "ĠPl ague", + "in us", + "EE P", + "Ġcru iser", + "ĠPar ish", + "umin ium", + "Ġoccup ants", + "ĠJ ihad", + "m op", + "Ġp int", + "Ġhe ct", + "ĠMe cca", + "direct or", + "ĠFund ing", + "ĠM ixed", + "Ġst ag", + "T ier", + "Ġg ust", + "Ġbright ly", + "ors i", + "Ġup hill", + "R D", + "Ġles ions", + "ĠBund y", + "liv ious", + "Ġbi ologist", + "ĠFac ulty", + "ĠAuthor ization", + "Ġ24 4", + "All ow", + "ï ¸", + "ĠGi ul", + "Ġpert inent", + "ot aur", + "es se", + "ĠRo of", + "Ġunman ned", + "35 1", + "ĠSh ak", + "ĠO rient", + "Ġend anger", + "D ir", + "Ġrepl en", + "ed ient", + "Ġtail or", + "Ġgad gets", + "Ġaud ible", + "âĺ Ĩ", + "N ice", + "Ġbomb ard", + "ĠR ape", + "Ġdef iance", + "ĠTW O", + "ĠFilip ino", + "Ġunaff ected", + "erv atives", + "Ġso ared", + "ĠBol ton", + "Ġcomprom ising", + "ĠBrew ers", + "R AL", + "ĠA HL", + "icy cle", + "Ġv ampires", + "Ġdi pped", + "oy er", + "ĠX III", + "Ġsidew ays", + "ĠW aste", + "ĠD iss", + "ĠâĶľ âĶĢâĶĢ", + "$ .", + "Ġhabit ats", + "ĠBe ef", + "tr uth", + "tr ained", + "spl it", + "R us", + "And y", + "ĠB ram", + "RE P", + "p id", + "è£ ħ", + "ĠMut ant", + "An im", + "ĠMar ina", + "Ġfut ile", + "hig hest", + "f requency", + "Ġepile psy", + "Ġcop ing", + "Ġconc ise", + "Ġtr acing", + "ĠS UN", + "pan el", + "ĠSoph ie", + "ĠCrow ley", + "ĠAd olf", + "ĠShoot er", + "Ġsh aky", + "ĠI G", + "ĠL ies", + "ĠBar ber", + "p kg", + "Ġupt ake", + "Ġpred atory", + "UL TS", + "/ **", + "Ġintox icated", + "ĠWest brook", + "od der", + "he ment", + "Ġbas eman", + "AP D", + "st orage", + "ĠFif ty", + "ed itor", + "G EN", + "UT ION", + "ir ting", + "Ġse wing", + "r ift", + "Ġag ony", + "ĠS ands", + "Ġ25 4", + "C ash", + "Ġl odge", + "Ġp unt", + "N atural", + "ĠIde as", + "Ġerrone ous", + "ĠSens or", + "ĠHann ity", + "Ġ19 21", + "Ġm ould", + "ĠG on", + "kay a", + "Ġanonym ously", + "ĠK EY", + "Ġsim ulator", + "W inter", + "Ġstream ed", + "50 7", + "? \",", + "Ġte ased", + "Ġco efficient", + "Ġwart ime", + "ĠTH R", + "' '.", + "ĠBank ing", + "mp ire", + "Ġf andom", + "Ġl ia", + "G a", + "Ġdown hill", + "Ġinterpre ting", + "Ind ividual", + "N orm", + "Ġjealous y", + "bit coin", + "Ġple asures", + "ĠToy s", + "ĠChev rolet", + "ĠAd visor", + "IZ E", + "Ġrecept ions", + "70 6", + "C ro", + "Ġ26 2", + "Ġcit rus", + "ir u", + "Review er", + "ject ed", + "U ES", + "an z", + "19 81", + "ĠWork er", + "Ġcompl ied", + "ores cent", + "contin ental", + "T on", + "ĠPr ism", + "ĠShe ep", + "Ġ28 8", + "n ox", + "ĠV og", + "O rd", + "Ġreal ms", + "te k", + "Ġirrig ation", + "Ġbicy cles", + "Ġelectron ically", + "p oly", + "t all", + "() );", + "Ġaest hetics", + "ĠInteg rated", + "Expl ore", + "Ġd unk", + "47 6", + "p ain", + "ĠJac ques", + "ĠD mit", + "Fram es", + "Ġreun ited", + "Ġhum id", + "D ro", + "P olitical", + "Ġyouth ful", + "Ġent ails", + "Ġmosqu ito", + "36 3", + "spe cies", + "Ġcoord inating", + "ĠMay hem", + "ĠMagn us", + "M ount", + "Impro ved", + "ĠST ATE", + "ATT LE", + "Ġflow ed", + "Ġtack led", + "Ġfashion ed", + "Ġre organ", + "iv ari", + "f inger", + "Ġreluct antly", + "et ting", + "ĠV and", + "you ng", + "ĠGar land", + "Ġpresum ption", + "Ġamen ities", + "ĠPle asant", + "on ential", + "ĠO xy", + "Ġmor als", + "ĠY ah", + "Read y", + "Sim on", + "En h", + "D emon", + "Ġcl ich", + "Mon itor", + "ĠD U", + "Ġwel comes", + "Ġstand out", + "Ġdread ful", + "Ġban anas", + "Ġball oons", + "h ooting", + "bas ic", + "Ġsuff ix", + "Ġd uly", + "can o", + "Ch ain", + "at os", + "Ġgeop olitical", + "Ġ( &", + "ĠGem ini", + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ", + "Ġacqu itted", + "L uck", + "prot ect", + "10 24", + "Ġsc arcity", + "Ġmind fulness", + "ec ided", + "D N", + "pr ime", + "ĠPres idents", + "ĠVID EO", + "Ġ( âĪĴ", + "add ock", + "N OR", + "ĠP ru", + "p un", + "ĠL OL", + ")) ))", + "ĠL iqu", + "ĠS AS", + "Ġsty ling", + "Ġpunish ments", + "Ġnum b", + "Ġasc ertain", + "ĠRock ies", + "f lu", + "Th umbnail", + "Ġperpet rated", + "ĠSem i", + "Ġdis arm", + "ĠOld er", + "ĠEx ception", + "Ġexponent ially", + "ĠCommun ities", + "Ġabol ish", + "ĠPart ner", + "pt oms", + "Ġ7 77", + "ĠFo ley", + "ĠC ases", + "Ġgre ase", + "ĠReb irth", + "G round", + "Ġ; )", + "ĠDoct rine", + "ik ini", + "Y e", + "ĠBl ossom", + "Ġpers ists", + "b ill", + "Ġinf usion", + "Ġbud dies", + "9 11", + "ĠPat ient", + "Ġdem os", + "Ġacquaint ance", + "ĠP aw", + "at ari", + "Ġx ml", + "Ġfasc ination", + "ĠSer ve", + "Ï Ĥ", + "br anded", + "Ġa z", + "Return s", + "Ġover shadow", + "Ġro am", + "Ġspeed y", + "n umbered", + "hel ial", + "Ġdisc iple", + "Ġass urances", + "g iven", + "pect ing", + "ĠN atalie", + "çĶ °", + "Ġmosquit oes", + "rote in", + "Ġnumer ic", + "Ġindepend ents", + "Ġtrans itional", + "Ġreaction ary", + "ĠMech dragon", + "do ctor", + "Ġshort est", + "Ġsequ ential", + "ĠB ac", + "ĠAccount s", + "ãģ Į", + "ach y", + "ract ive", + "ĠReg iment", + "Ġbreat htaking", + "ffic iency", + "ĠB ates", + "Ġ3 11", + "Ġward robe", + "ft s", + "ĠBer k", + "Sim ply", + "ĠRivers ide", + "iver ing", + "ident ial", + "lu cent", + "Ġen riched", + "ĠCon ver", + "ĠG iving", + "ãĥ Ļ", + "Ġlegal ize", + "ĠF TC", + "Ġfre aking", + "M ix", + "Ġter restrial", + "es ian", + "ci ents", + "W ing", + "LO AD", + "Ġled ge", + "ĠViol ent", + "ĠMet all", + "Ġ30 8", + "Ġs outheastern", + "hett o", + "M eat", + "Ġslow down", + "Ġret reated", + "Jere my", + "end as", + "**** *", + "er ic", + "Ġre ins", + "opp able", + "ĠHuman ity", + "ear ances", + "rig an", + "C amera", + "Ġwa ivers", + "s oc", + "Ġalter ation", + "trans form", + "ĠC emetery", + "50 6", + "Ġindef inite", + "Ġstim ulating", + "y g", + "60 3", + "ĠS op", + "Ġdescript ive", + "Ph ase", + "ĠEd mund", + "Ġpneum onia", + "vent us", + "A mb", + "Ġlabor atories", + "ĠEx clusive", + "ug ar", + "W ere", + "Ġmalf unction", + "Ġhomosexual s", + "Ġ---- ---", + "un i", + "Ġturb ines", + "ĠEqu ity", + "D u", + "Ġmind ed", + "ĠR H", + "ĠBlack hawks", + "Ġfe ats", + "Ġ17 00", + "re pl", + "36 2", + "lad en", + "Ġindisp ensable", + "ly ss", + "tt i", + "Ġre el", + "Ġdiver ted", + "Ġlik eness", + "Ġsubscript ions", + "Ġfing ert", + "Ġfil thy", + "dest ruct", + "d raft", + "ĠBernard ino", + "l aunch", + "Ġper plex", + "ĠS UM", + "car b", + "Ġswe ater", + "ĠVent ure", + "ĠJ ag", + "ĠCele b", + "ĠV oters", + "Ġstead fast", + "Ġathlet ics", + "ĠHans on", + "ĠDr ac", + "Tr acker", + "Ġcomm end", + "ĠPres idency", + "ĠD ID", + "in formed", + "Ġweb page", + "P retty", + "Ġforce fully", + "ãĥĥ ãĤ¯", + "Ġrel ocation", + "Ġsat ire", + "â ī", + "ĠSunder land", + "æ Ħ", + "V oice", + "???? ????", + "Ġinform ant", + "Ġbow el", + "ĠUn iform", + "Ġ ...\"", + "Ġpur ge", + "Ġpic nic", + "ĠU mb", + "ĠU PDATE", + "ĠSapp hire", + "ĠSt all", + "le arn", + "Ġobject ively", + "Ġob liter", + "Ġlooph ole", + "Ġjour neys", + "Ġo mission", + "Pro s", + "ĠSid ney", + "pl oma", + "Ġspray ed", + "Ġg uru", + "Ġtra itor", + "Ġtim et", + "Ġsn apping", + "ĠSe vent", + "urn al", + "ĠUk ip", + "Ġb owed", + "por al", + "l iberal", + "R os", + "Quest ions", + "i OS", + "Ġsummar ize", + "ST AT", + "Ġ18 50", + "ap est", + "Ġl ender", + "ĠVari able", + "br inging", + "ĠL ORD", + ", )", + "Ġcollaps es", + "x iety", + "ĠN ed", + "Y D", + "ĠSch a", + "Ġantib ody", + "Ġdis band", + "y re", + "ill usion", + "Ġro ver", + "s hed", + "ĠHiro sh", + "cc i", + "Ġcal am", + "ĠMort on", + "P interest", + "Ġ19 28", + "ĠE uras", + "ord es", + "Ġf ences", + "ĠIn ventory", + "ĠVal encia", + "ĠU d", + "ĠT iff", + "Ġsqu e", + "Ġqu otation", + "Ġtroubles ome", + "er ker", + "QU EST", + "ĠKing doms", + "s outh", + "Ġle vy", + "Pr ince", + "ĠSt ing", + "Ġnick named", + "Ġapp e", + "Ġphot ographic", + "Ġcorp us", + "re ference", + "ĠT rog", + "U nt", + ") =(", + "ĠLat via", + "Ġactiv ating", + "Ġlicense e", + "Ġdispar ities", + "ĠNews letter", + "ãĥĥ ãĥĪ", + "Ġfree ing", + "ĠJe ep", + "ĠPer ception", + "ins k", + "Ġsil icone", + "ĠHay den", + "Le an", + "ĠSuz uki", + "ibr arian", + "66 8", + "Ġsp or", + "Ġcorrel ations", + "ag hetti", + "Ġtu ber", + "ĠIP CC", + "il us", + "ĠV u", + "Ġwealth iest", + "ĠCarb uncle", + "an za", + "Ġfool ed", + "ĠZ ur", + "Ġd addy", + "ran o", + "il ian", + "Ġknock out", + "f man", + "requ ired", + "ĠWik ileaks", + "ĠD uffy", + "ON T", + "Ġins ol", + "ĠObject s", + "Ġb ou", + "ĠNord ic", + "ĠIns ert", + "sc an", + "Ġd ancers", + "Ġid iots", + "major ity", + "ĠNev ille", + "ĠFree BSD", + "Ġt art", + "pan ic", + "69 0", + "Ġcoc oa", + "Ġsam pled", + "Ġlook up", + "Ind ust", + "Ġinject ions", + "gen re", + "Ġa u", + "Ġroad way", + "Ġgen itals", + "K ind", + "ĠEx aminer", + "ĠY az", + "F resh", + "Ġpar alysis", + "ĠAl uminum", + "Ġre ap", + "ok é", + "Ġsl oppy", + "ĠTun nel", + "pos ium", + "ner y", + "en ic", + "Ġher bal", + "ĠOut er", + "ĠBuild er", + "Ġinc ur", + "Ġide ologies", + "Ġback ups", + "cons uming", + "ĠDet ect", + "de ck", + "ĠKN OW", + "ĠG ret", + "ĠM IC", + "Ġtough ness", + "ĠEx hibit", + "Ġh ive", + "L es", + "ĠSCH OOL", + "ĠAt ari", + "ald e", + "ĠN ull", + "and estine", + "m ouse", + "Ġbrig ade", + "48 9", + "Ġrev ol", + "ĠLaw son", + "ĠW ah", + "op oly", + "eb ted", + "ĠS aunders", + "Ġ3 13", + "ĠW inc", + "Ġtab oo", + "ĠHel met", + "Ġw edge", + "ch ip", + "ĠT ina", + "b g", + "Ġinf uri", + "r n", + "Ġanomal ies", + "ĠSy nc", + "ĠEx am", + "ĠComm it", + "ĠDi ary", + "ĠALS O", + "ĠDe bor", + "omed ical", + "Ġcomprehens ion", + "6 55", + "Ġempower ing", + "Ġ ire", + "Ġju ices", + "ĠE TH", + "ĠBox ing", + "=\" /", + "Ġfacilit ated", + "p oke", + "ĠPars ons", + "ĠMod er", + "tra vel", + "Ġcivil izations", + "Ġliber tarians", + "Ġrun e", + "ĠCl arks", + "at hed", + "Ġcampaign ers", + "ĠDis patch", + "ĠFah renheit", + "ĠCap com", + "-------- --", + "Ġl ace", + "Ġdr aining", + "Ġl iner", + "ĠArt ificial", + "é n", + "t ask", + "] ).", + "ĠGM O", + "ĠOper ator", + "ord inary", + "ĠInf luence", + "ĠU ps", + "Ġpot ency", + "uss en", + "osp ons", + "ĠSw im", + "ĠDead line", + "Un ity", + "Ġcul inary", + "Ġenlight enment", + "Ġwe arer", + "Ġmin ed", + "Ġp ly", + "Ġinc est", + "ĠDVD s", + "W alk", + "B TC", + "Tr ade", + "Ġdev al", + "ib and", + "ĠOvers ight", + "Palest inian", + "Ġd art", + "Ġm ul", + "L R", + "Ġrem ovable", + "ĠReal ms", + "ì Ŀ", + "Ġmisc ar", + "ĠV ulkan", + "68 5", + "è re", + "ĠS ap", + "Ġmer ging", + "ĠCar ly", + "che ster", + "Ġbr isk", + "Ġlux urious", + "ĠGener ator", + "Ġbit terness", + "Ġed ible", + "Ġ24 3", + "T G", + "Ġrect angle", + "With No", + "bel ow", + "J enn", + "Ġdark est", + "Ġh itch", + "Ġdos age", + "Ġsc aven", + "ĠK eller", + "ĠIllust rated", + "Certain ly", + "ĠMaver icks", + "Marg inal", + "Ġdiarr hea", + "Ġenorm ously", + "Ġ9 99", + "sh r", + "qu art", + "Ġadam ant", + "ĠM ew", + "Ġren ovation", + "Ġcerv ical", + "ĠPercent age", + "en ers", + "ĠKim ber", + "Ġflo ats", + "Ġde x", + "ĠW itcher", + "ĠSwan sea", + "d m", + "Ġsal ty", + "y ellow", + "Ġca pe", + "ĠDr ain", + "ĠPaul a", + "ĠTol edo", + "les i", + "Mag azine", + "ĠW ick", + "ĠM n", + "ĠA ck", + "ĠR iding", + "AS ON", + "Ġhom ophobic", + "AR P", + "Ġwand ered", + "C PU", + "ood oo", + "ĠP ipe", + "Ġtight ening", + "ĠBut t", + "3 18", + "Ġdesert ed", + "S ession", + "Ġfacilit ating", + "J ump", + "Ġemer gencies", + "OW ER", + "Ġexhaust ive", + "ĠAF TER", + "Ġheart beat", + "ĠLab el", + "ack y", + "ĠCert ified", + "ilt ration", + "Z e", + "ĠU tt", + "Ġ13 00", + "Ġpres ume", + "ĠDis p", + "Ġsur ged", + "Ġdoll s", + "Col umb", + "Ġchim pan", + "ĠR azor", + "Ġt icks", + "Ġcouncill or", + "Ġpilgr image", + "ĠReb els", + "ĠQ C", + "ĠA uction", + "x ia", + "ik k", + "b red", + "Ġinsert ion", + "Ġco arse", + "d B", + "SE E", + "ĠZ ap", + "ĠF oo", + "Ġcontem por", + "ĠQuarter ly", + "ot ions", + "ĠAl chemist", + "ĠT rey", + "ĠDu o", + "S weet", + "80 4", + "ĠGi ov", + "Ġfun n", + "N in", + "h off", + "Ġram ifications", + "Ġ19 22", + "ĠExper ts", + "az es", + "Ġgar ments", + "ar ial", + "ĠN ab", + "Ġ25 7", + "ĠV ed", + "Ġhum orous", + "ĠPom pe", + "Ġn ylon", + "Ġlur king", + "ĠSerge y", + "ĠMatt is", + "Ġmisogyn y", + "ĠComp onents", + "ĠWatch ing", + "ĠF olk", + "ract ical", + "B ush", + "Ġt aped", + "Ġgroup ing", + "Ġbe ads", + "Ġ20 48", + "Ġcon du", + "quer que", + "Read ing", + "Ġgriev ances", + "Ult ra", + "Ġend point", + "H ig", + "ĠSt atic", + "ĠScar borough", + "L ua", + "ĠMess i", + "a qu", + "ĠPsy Net", + "ĠR udd", + "Ġa venue", + "v p", + "J er", + "Ġsh ady", + "ĠRes ist", + "ĠArt emis", + "Ġcare less", + "Ġbro kers", + "Ġtemper ament", + "Ġ5 20", + "T ags", + "ĠTurn ing", + "Ġut tered", + "Ġp edd", + "Ġimpro vised", + "Ġ: (", + "Ġtab l", + "Ġpl ains", + "16 00", + "press ure", + "ĠEss ence", + "marg in", + "friend s", + "ĠRest oration", + "Ġpoll ut", + "ĠPok er", + "ĠAugust ine", + "ĠC IS", + "ĠSE AL", + "or ama", + "Ġth wart", + "se ek", + "Ġp agan", + " º", + "cp u", + "Ġg arn", + "Ġass ortment", + "ĠI LCS", + "t ower", + "Recomm ended", + "Ġun born", + "ĠRandom Redditor", + "ĠRandomRedditor WithNo", + "Ġparaly zed", + "Ġeru ption", + "Ġinter sect", + "ĠSt oke", + "ĠS co", + "B ind", + "å ¾", + "ĠP NG", + "ĠNeg ative", + "ĠNO AA", + "Le on", + "Ġall oy", + "ĠL ama", + "ĠD iversity", + "5 75", + "Ġunderest imated", + "ĠSc or", + "Ġm ural", + "Ġb usted", + "so on", + "l if", + "Ġnone x", + "Ġall ergy", + "ĠUnder world", + "ĠR ays", + "ĠBl asio", + "Ġh rs", + "ĠD ir", + "Ġ3 27", + "by ter", + "Ġrepl acements", + "Ġactiv ates", + "ri ved", + "M H", + "Ġp ans", + "ĠH I", + "Ġlong itudinal", + "Ġnu isance", + "al er", + "Ġsw ell", + "ĠS igned", + "s ci", + "ĠIs les", + "ĠA GA", + "Ġdef iant", + "Ġson ic", + "oc on", + "K C", + "ĠA im", + "t ie", + "ah ah", + "Ġm L", + "D X", + "Ġb isc", + "ĠBill board", + "ĠSY STEM", + "NE Y", + "ga ard", + "Ġdist ressed", + "former ly", + "Al an", + "Ġche fs", + "Ġopt ics", + "ĠC omet", + "ĠAM C", + "Ġredes igned", + "irm ation", + "Ġsight ings", + "38 2", + "3 11", + "ĠW B", + "Ġcont raction", + "ĠT OTAL", + "D ual", + "Ġstart led", + "Ġunderstand ably", + "Ġsung lasses", + "ETH OD", + "Ġd ocker", + "Ġsurf ing", + "ĠH EL", + "ĠSl ack", + "ton es", + "Ġsh alt", + "Vis ual", + "49 8", + "Dep artment", + "c ussion", + "Ġunrest ricted", + "Ġt ad", + "Ġre name", + "employ ed", + "Ġeduc ating", + "Ġgrin ned", + "bed room", + "ĠActiv ities", + "ĠV elvet", + "ĠSW AT", + "Ġsh uffle", + "ig or", + "Ġsatur ation", + "F inding", + "c ream", + "ic ter", + "Ġv odka", + "tr acking", + "te c", + "Ġfore ground", + "iest a", + "Ġve hement", + "ĠEC B", + "ĠT ie", + "E y", + "Ġt urtles", + "ĠRail road", + "ĠKat z", + "ĠFram es", + "Ġmen ace", + "ĠFell owship", + "ĠEss ential", + "ugg ish", + "Ġdri p", + "ch witz", + "ĠKy oto", + "s b", + "ĠN ina", + "Param eter", + "Ġal arms", + "ĠCl aud", + "Ġpione ering", + "Ġchief ly", + "ĠSc ream", + "Col lection", + "Ġthank fully", + "ĠRonald o", + "åŃ IJ", + "st rip", + "ĠDisney land", + "com mercial", + "See ing", + "S oul", + "Ġevac uate", + "Ġc iv", + "ĠAs he", + "Ġdiv ides", + "ĠD agger", + "rehens ive", + "Ġber ries", + "ĠD F", + "Ġs ushi", + "Ġplur ality", + "W I", + "Ġdisadvant aged", + "Ġbatt alion", + "ob iles", + "45 1", + "Ġcl ing", + "Ġunden iable", + "ĠL ounge", + "Ġha unt", + "p he", + "Ġquant ify", + "Ġdiff ered", + "Ġ[* ]", + "ĠV iz", + "c um", + "sl ave", + "Ġvide og", + "Ġqu ar", + "Ġbund les", + "ĠAl onso", + "t ackle", + "Ġneur onal", + "Ġlandsl ide", + "conf irmed", + "ĠDep th", + "Ġrenew ables", + "B ear", + "ĠMaced onia", + "Ġjer seys", + "Ġb unk", + "ĠSp awn", + "ĠControl s", + "ĠBuch anan", + "Ġrobot ics", + "Ġemphas izing", + "ĠTut orial", + "h yp", + "ist on", + "Ġmonument al", + "æ °", + "ĠCar ry", + "Ġt bsp", + "en ance", + "H ill", + "art hed", + "Ġro tten", + "De an", + "Ġtw isting", + "Ġgood will", + "Ġimm ersion", + "L iving", + "Ġbr ushes", + "ĠC GI", + "ĠAt k", + "tr aditional", + "Ġph antom", + "ĠSt amina", + "Ġexpans ions", + "ĠMar in", + "Ġembark ed", + "ĠE g", + "int estinal", + "ĠPE OPLE", + "ĠBo oth", + "ĠApp alach", + "Ġreleg ated", + "V T", + "M IT", + "Ġmust er", + "Ġwithdraw ing", + "Ġmicrosc ope", + "ĠG athering", + "ĠC rescent", + "ĠArgent ine", + "ĠDec re", + "ĠDomin ic", + "Ġbud s", + "ant age", + "ĠI on", + "Ġwid ened", + "ONS ORED", + "ĠGl oves", + "iann opoulos", + "raz en", + "fe el", + "Ġrepay ment", + "Ġhind sight", + "ĠRE ALLY", + "ĠPist ol", + "ĠBra h", + "Ġwat ts", + "Ġsurv ives", + "Ġfl urry", + "iss y", + "Al ert", + "ĠUrug uay", + "Ph oenix", + "S low", + "ĠG rave", + "ĠF ir", + "Ġmanage able", + "Ġtar iff", + "ĠU DP", + "ĠPist ons", + "ĠNiger ian", + "Ġstrike outs", + "Ġcos metics", + "whel ming", + "f ab", + "c ape", + "pro xy", + "Ġre think", + "Ġover coming", + "sim ple", + "Ġw oo", + "Ġdistract ing", + "ĠSt anton", + "ĠTuls a", + "ĠD ock", + "65 9", + "Ġdisc ord", + "ĠEm acs", + "ĠV es", + "ĠR OB", + "Ġreass uring", + "Ġcons ortium", + "Muslim s", + "3 21", + "Ġprompt s", + "se i", + "ĠH itch", + "imp osed", + "ĠF ool", + "Ġindisc rim", + "wr ong", + "bu querque", + "D avis", + "! ]", + "Ġtim eless", + "ĠNE ED", + "Ġpestic ide", + "Ġrally ing", + "ĠCal der", + "Ġå ¤", + "Ġx p", + "ĠUn le", + "ĠEx port", + "lu aj", + "B uff", + ") [", + "Ġsq or", + "S audi", + "Ġis tg", + "Ġindul ge", + "pro c", + "Ġdisg usted", + "Ġcomp ounded", + "Ġn em", + "Ġschool ing", + "ĠC ure", + "process ing", + "S ol", + "Ġpro verb", + "it ized", + "ĠAlv arez", + "Ġscar f", + "Ġrect angular", + "re ve", + "Ġh ormonal", + "ĠSt ress", + "itiz en", + "Ġ4 25", + "girl s", + "ĠNo ir", + "ĠR app", + "Ġmar ches", + "ch urch", + "ĠUs es", + "Ġ40 5", + "ĠBer m", + "Ġord inances", + "ĠJud gment", + "Charg es", + "ĠZ in", + "Ġdust y", + "Ġstraw berries", + "Ġper ce", + "ĠTh ur", + "ĠDebor ah", + "net flix", + "ĠLam bert", + "Ġam used", + "ĠGu ang", + "Y OU", + "R GB", + "ĠC CTV", + "Ġf iat", + "r ang", + "Ġf ederation", + "ĠM ant", + "ĠB ust", + "ĠM are", + "respect ive", + "ĠM igration", + "ĠB IT", + "59 0", + "Ġpatriot ism", + "Ġout lining", + "reg ion", + "ĠJos é", + "Ġbl asting", + "ĠEz ra", + "B s", + "Ġundermin es", + "ĠSm ooth", + "Ġcl ashed", + "rad io", + "Ġtransition ing", + "ĠBucc aneers", + "ĠOw l", + "Ġplug s", + "Ġh iatus", + "ĠPin ball", + "Ġm ig", + "ĠNut r", + "ĠWolf e", + "Ġinteg ers", + "Ġor bits", + "ĠEd win", + "ĠDirect X", + "b ite", + "Ġbl azing", + "v r", + "Ed ge", + "ĠP ID", + "ex it", + "ĠCom ed", + "ĠPath finder", + "ĠGu id", + "ĠSign s", + "ĠZ er", + "ĠAg enda", + "Ġreimburse ment", + "M esh", + "i Phone", + "ĠMar cos", + "ĠS ites", + "h ate", + "en burg", + "Ġs ockets", + "p end", + "Bat man", + "v ir", + "ĠSH OW", + "Ġprovision al", + "con n", + "ĠDeath s", + "AT IVE", + "Pro file", + "sy m", + "J A", + "Ġnin ja", + "inst alled", + "id ates", + "eb ra", + "ĠOm aha", + "Ġse izing", + "ĠBe asts", + "Ġsal ts", + "M ission", + "Gener ally", + "ĠTr ilogy", + "he on", + "leg ates", + "Ġd ime", + "Ġf aire", + "par able", + "G raph", + "Ġtotal ing", + "Ġdiagram s", + "ĠYan uk", + "ple t", + "ĠMe h", + "Ġmyth ical", + "ĠStep hens", + "aut ical", + "ochem istry", + "Ġkil ograms", + "Ġel bows", + "anc ock", + "ĠB CE", + "ĠPr ague", + "Ġimpro v", + "ĠDev in", + "Ġ\" \\", + "par alle", + "Ġsuprem acists", + "ĠB illion", + "Ġreg imen", + "inn acle", + "Ġrequ isite", + "ang an", + "ĠBur lington", + "ain ment", + "ĠObject ive", + "oms ky", + "G V", + "Ġun ilateral", + "Ġt c", + "Ġh ires", + "ment al", + "Ġinvol untary", + "Ġtrans pl", + "ĠASC II", + " ¨", + "Ev ents", + "Ġdoub ted", + "ĠKa plan", + "ĠCour age", + "ig on", + "ĠMan aging", + "ĠT art", + "Ġfalse hood", + "ĠV iolet", + "Ġair s", + "Ġfertil izer", + "Brit ain", + "Ġaqu atic", + "ou f", + "W ords", + "ĠHart ford", + "Ġeven ings", + "ĠV engeance", + "qu ite", + "G all", + "ĠP ret", + "Ġp df", + "ĠL M", + "ĠSo chi", + "ĠInter cept", + "9 20", + "Ġprofit ability", + "ĠId le", + "ĠMac Donald", + "ĠEst ablishment", + "um sy", + "Ġgather ings", + "ĠN aj", + "Charl ie", + "Ġas cent", + "ĠProt ector", + "Ġal gebra", + "Ġbi os", + "for ums", + "EL S", + "Introdu ced", + "Ġ3 35", + "Ġastron omy", + "Cont ribut", + "ĠPol ic", + "Pl atform", + "Ġcontain ment", + "w rap", + "Ġcoron ary", + "ĠJ elly", + "man ager", + "Ġheart breaking", + "c air", + "ĠChe ro", + "c gi", + "Med ical", + "ĠAccount ability", + "! !\"", + "oph ile", + "Ġpsych otic", + "ĠRest rict", + "Ġequ itable", + "iss ues", + "Ġ19 05", + "ĠN ek", + "c ised", + "ĠTr acking", + "Ġo zone", + "Ġcook er", + "ros is", + "Ġre open", + "Ġinf inity", + "ĠPharm aceutical", + "ens ional", + "Att empt", + "ĠR ory", + "Mar co", + "Ġawa its", + "H OW", + "t reated", + "Ġbol st", + "Ġreve red", + "Ġp ods", + "opp ers", + "00 10", + "Ġampl itude", + "ric an", + "SP ONSORED", + "Ġtrou sers", + "Ġhal ves", + "ĠK aine", + "ĠCut ler", + "ĠA UTH", + "Ġsplend id", + "Ġprevent ive", + "ĠDud ley", + "if acts", + "umin ati", + "ĠY in", + "Ġad mon", + "ĠV ag", + "Ġin verted", + "Ġhast ily", + "ĠH ague", + "L yn", + "Ġled ger", + "Ġastron omical", + "get ting", + "Ġcirc a", + "ĠC ic", + "ĠTenn is", + "Lim ited", + "Ġd ru", + "ĠBY U", + "Ġtrave llers", + "Ġp ane", + "ĠInt ro", + "Ġpatient ly", + "Ġa iding", + "Ġlo os", + "ĠT ough", + "Ġ29 3", + "Ġconsum es", + "Source File", + "Ġ\"\" \"", + "Ġbond ing", + "Ġtil ted", + "Ġmenstru al", + "ĠCel estial", + "UL AR", + "Plug in", + "Ġrisk ing", + "N az", + "ĠRiy adh", + "Ġacc redited", + "Ġsk irm", + "é Ľ", + "Ġexam iner", + "Ġmess ing", + "Ġnear ing", + "ĠC hern", + "ĠBeck ham", + "Ġsw apped", + "Ġgo ose", + "K ay", + "Ġlo fty", + "ĠWal let", + "Ġ[ '", + "Ġap ocalypse", + "Ġb amboo", + "ĠSP ACE", + "ĠEl ena", + "Ġ30 6", + "ac ons", + "Ġtight ened", + "Ġadolesc ence", + "Ġrain y", + "Ġvandal ism", + "ĠNew town", + "Ġcon ject", + "c akes", + "Ġche ated", + "Ġmoder ators", + "par ams", + "E FF", + "Ġdece it", + "ĠST L", + "ĠTanz ania", + "ĠR I", + "Ġ19 23", + "ĠEx ile", + "the l", + "Ġthe olog", + "Ġquir ky", + "ĠIr vine", + "Ġneed y", + "or is", + "U m", + "K a", + "Ġmail box", + "3 22", + "Ġb os", + "ĠPet ra", + "K ING", + "Ġenlarg ed", + "O ften", + "Ġbad ass", + "Ġ3 43", + "ĠPl aces", + "ĠC AD", + "Ġpr istine", + "Ġinterven ing", + "d irection", + "Ġl az", + "ĠD SM", + "Ġproject ing", + "ĠF unk", + "ag og", + "pay ment", + "n ov", + "Ġch atter", + "AR B", + "Ġexam inations", + "ĠHouse hold", + "ĠG us", + "F ord", + "4 14", + "B oss", + "Ġmy stic", + "Ġle aps", + "ĠB av", + "ul z", + "b udget", + "Foot ball", + "Ġsubsid ized", + "Ġfirst hand", + "Ġcoinc ide", + "oc ular", + "Con n", + "ĠColl abor", + "Ġfool s", + "am ura", + "ah ar", + "r ists", + "Ġsw ollen", + "Ġexp ended", + "ĠP au", + "s up", + "Ġsp ar", + "Ġkey note", + "s uff", + "Ġunequ al", + "Ġprogress ing", + "str ings", + "ĠGamer gate", + "Dis ney", + "ĠEle ven", + "om nia", + "Ġscript ed", + "Ġear ners", + "bro ther", + "ĠEn abled", + "æ ³", + "Ġlar vae", + "ĠL OC", + "m ess", + "Wil son", + "ĠTem plate", + "success fully", + "Ġparam ount", + "Ġcamoufl age", + "Ġbind s", + "ĠQu iet", + "ĠSh utterstock", + "r ush", + "Ġmasc ot", + "fort une", + "ĠCol t", + "ĠBe yon", + "hab i", + "Ġha irc", + "Ġ26 7", + "ĠDe us", + "Ġtw itch", + "Ġconcent rating", + "Ġn ipples", + "c ible", + "Ġg ir", + "N Z", + "M ath", + "n ih", + "Requ ired", + "Ġp onder", + "ĠS AN", + "Ġwedd ings", + "Ġl oneliness", + "N ES", + "ĠMah jong", + "69 5", + "add le", + "ĠGar ner", + "ĠC OUR", + "Br idge", + "Ġsp ree", + "ĠCald well", + "Ġbri bery", + "Ġ���� ����", + "plug ins", + "Ġr acket", + "Ġchamp agne", + "vers ible", + "V ote", + "Ġmod ifiers", + "May or", + "6 80", + "Ġassemb lies", + "ĠS ultan", + "ĠN ing", + "ĠLad ies", + "Ġsulf ur", + "Ġor bs", + "Ġ---- -", + "____ ___", + "ĠJournal ism", + "Ġes ports", + "Ġl ush", + "Ġh ue", + "Ġspect ral", + "H onest", + "ãĥ ı", + "Ġbus hes", + "Ġrein forcement", + "Ġre opened", + "ĠWhe els", + "ĠM org", + "rie ving", + "Ġaux iliary", + "Ġj Query", + "ĠB AT", + "tes que", + "Ġver tex", + "p ure", + "f rey", + "ãĤ º", + "d os", + "Ġty ph", + "Ġc ull", + "Ġe q", + "Ġdec on", + "Ġtoss ing", + "Ġdispar ate", + "ĠBr igham", + "print f", + "led ged", + "Ġsu nd", + "Ġco zy", + "Ġhepat itis", + "per forming", + "Ġav al", + "ĠG G", + "f uture", + "Ġpet ertodd", + "ĠKos ovo", + "Ġmagn ets", + "Al ready", + "ĠEd ison", + "ĠCe res", + "ĠRA ID", + "Ġbrill iance", + "57 6", + "Ġder ives", + "Ġhypert ension", + "ĠÎ Ķ", + "Ġlamb da", + "Ġfl air", + "Ġmission aries", + "Ġrap es", + "ĠSt arter", + "ĠMon ths", + "Ġdef y", + "Ġseism ic", + "ĠR aphael", + "Ġeuro zone", + "65 6", + "z sche", + "Ġscr atched", + "Ġb ows", + "ĠLenn on", + "ĠGa ia", + "Ġdri pping", + "f acts", + "A le", + "Ġfrog s", + "ĠBre ast", + "ogene ity", + "ĠProsecut or", + "Ġampl ified", + "ĠHod g", + "ĠF n", + "Th ousands", + "ĠNI H", + "ĠMonitor ing", + "FT WARE", + "ĠPri ebus", + "ĠG rowing", + "hun ter", + "Ġdiagn ose", + "ĠM ald", + "ĠL R", + "Ġcrown ed", + "Ġburst ing", + "Ġdiss olution", + "j avascript", + "Ġuseful ness", + "ĠExec ution", + ": (", + "ĠIv ory", + "a ah", + "Ġpersecut ed", + "viol ence", + "ist as", + "ĠCr ate", + "Ġimpuls es", + "ĠSp ani", + "ed es", + "Hand le", + "ĠZ erg", + "think able", + "Last ly", + "Ġspont aneously", + "Ġinconven ient", + "Ġdismiss ing", + "Ġpl otted", + "Ġeight y", + "Ġ7 37", + "r ish", + "ĠThor nton", + "ath am", + "Ġsit com", + "V en", + "Rec ipe", + "t el", + "l und", + "Ġcle ars", + "ĠSas uke", + "Ġ25 8", + "Ġopt ing", + "Ġen raged", + "est hetic", + "ĠA e", + "uch s", + "Pre p", + "Fl ow", + "Ġrun off", + "ĠE ating", + "ĠG iles", + "ĠAct ing", + "res ources", + "ib aba", + "Ġr pm", + "Ġske wed", + "ĠBl anc", + "ĠS akuya", + "Ġhot ter", + "Ġ19 24", + "op ian", + "ck o", + "Ġcr umbling", + "Ġcapt ains", + "ĠAppropri ations", + "le aders", + "dro pping", + "an uts", + "Ġrevers ing", + "ĠP ose", + "ĠS ek", + "Sc ot", + "ĠIde a", + "c ise", + "ĠSloven ia", + "Ġ3 17", + "Do ctor", + "Ġcro cod", + "ald i", + "Se a", + "ĠFar rell", + "Ġmerc enaries", + "ĠR NC", + "ĠGu ess", + "Ġp acing", + "M achine", + "Streamer Bot", + "ĠChar ity", + "Ġ29 8", + "Ġcann ons", + "ĠTob y", + "TPP StreamerBot", + "ĠPass ion", + "cf g", + "Th om", + "Ġbad ges", + "ĠBern stein", + ". âĢĵ", + "ĠP OP", + "ĠCon j", + "Ġinitial ization", + "Ġbiod iversity", + "D ub", + "Ġfeud al", + "Ġdisclaim er", + "Ġc row", + "Ġign ition", + "ar f", + "S HA", + "Ġk Hz", + "h azard", + "ĠArt ists", + "oe uv", + "67 9", + "ĠRud y", + "N ine", + "ĠRam adan", + "å ½", + "itt o", + "Ġadren aline", + "C ert", + "Ġsmell ed", + "Ġimp unity", + "Ġag endas", + "ĠRe born", + "ĠCon cent", + "ĠSe ems", + "Ġo mega", + "ĠDust in", + "Ġback er", + "ĠSau ce", + "ĠBoy le", + "W IN", + "Ġsp ins", + "Ġpa uses", + "u pt", + "Ġshred ded", + "Ġstra pped", + "ĠCor ruption", + "Ġscr atches", + "Ġn i", + "Ġatt ire", + "ĠS AF", + "Factory Reloaded", + "ĠI PS", + "Ġ( %", + "Ġsem inar", + "f ocus", + "c ivil", + "Ġ18 60", + "int osh", + "Ġcontin ual", + "Ġabbre vi", + "ĠS ok", + "oc obo", + "X M", + "Ġfr antic", + "Ġunavoid able", + "Ġar tery", + "Ġannot ations", + "b ath", + "Cl imate", + "Ġd ors", + "ĠSl ide", + "co ord", + "ĠRel oad", + "ĠL DL", + "ĠLove craft", + "Ġunim agin", + "Ġresemb led", + "Ġbarr acks", + "n p", + "Ġsurrog ate", + "Ġcategor ized", + "ãĤ ©", + "Ġvacc inated", + "Ġdrain age", + "Ġind ist", + "ĠWhats App", + "Ġ18 70", + "oler ance", + "inv oke", + "am orph", + "Ġrecon nect", + "Ġem anc", + "Ġblind ness", + "Ġ12 80", + "intern et", + "c ollar", + "Ġalt ru", + "Ġab yss", + "ĠT RI", + "65 7", + "Ġinf used", + "HE AD", + "Ġforest ry", + "ĠWood y", + "ĠC i", + "w i", + "s am", + "78 4", + "hol iday", + "Ġmog ul", + "ĠF ees", + "ĠD EN", + "In ternal", + "ur bed", + "f usc", + "at om", + "ĠIll usion", + "Ġpoll ed", + "Ġfl ap", + "Ġco ax", + "L GBT", + "An aly", + "ĠSect ions", + "ĠCalif orn", + "em n", + "Ġh ither", + "ĠN IGHT", + "Ġn ailed", + "ĠPip eline", + "39 1", + "o of", + "ĠPr imal", + "vere nd", + "Ġsl ashing", + "Ġret ri", + "avi our", + "Ġdepart ing", + "g il", + "IS C", + "Ġmid way", + "Ġultras ound", + "Ġbeh aving", + "ĠT ara", + "class es", + "V irtual", + "ĠColon ial", + "Ġstri pping", + "Ġorchestr ated", + "ĠGra ves", + "45 2", + "ĠIron ically", + "ĠWrit ers", + "Ġl ends", + "ĠMan z", + "Ġra ven", + "Ġoxid ative", + "Ġ26 6", + "EL F", + "act ually", + "asc ar", + "D raft", + "Ġfavour able", + "Ġhumili ating", + "Ġf idelity", + "ĠH of", + "ĠX uan", + "49 6", + "Ġlay ered", + "at is", + "79 0", + "Ġpay check", + "it on", + "K ar", + "ĠVM ware", + "ĠFar mer", + "Ġserv ic", + "gl omer", + "Ġsl ump", + "ĠFab ric", + "ĠD OC", + "est ing", + "Ġreass ure", + "Ġph yl", + "v olt", + "it ory", + "R ules", + "Ġoxid ation", + "Ġpri zed", + "Ġmist ress", + "ĠDj ango", + "WAR N", + "å ij", + "Ġenc ode", + "ĠFeed back", + "Ġstupid ity", + "I an", + "ĠYugoslav ia", + "× ¨", + "ac l", + "UT E", + "19 77", + "Ġqual ifies", + "Ġpuls es", + "pret ty", + "Ġfro ze", + "Ġs s", + "Iter ator", + "Ġur gently", + "Ġm ailed", + "ĠCh am", + "Ġsust aining", + "Ġbas il", + "Ġpupp ies", + "il ant", + "ĠP LEASE", + "l ap", + "ace ous", + "F ear", + "ĠMaster y", + "aut omatic", + "ĠT AG", + "Ġant im", + "ag les", + "47 3", + "fram es", + "Ġwh ispers", + "ĠWho ever", + "Ġbra very", + "ĠUK IP", + "ract ions", + "\"\" \"", + "Ġt ame", + "Ġpart ed", + "every thing", + "CON T", + "Ġind ebted", + "Ġadd r", + "re k", + "IR ED", + "Ġem inent", + "cl inton", + "Ġo usted", + "Ġreview er", + "Ġmelt down", + "Ġre arr", + "ĠY ao", + "the real", + "aby te", + "Ġst umbling", + "Ġbat ches", + "Ġ25 9", + "Ġcontrace ptive", + "Ġprost itute", + "ens is", + "De cl", + "ĠSt rikes", + "M ilitary", + "ĠO ath", + "v acc", + "pp ings", + "05 2", + "Ġpart Name", + "amp ing", + "Rep orts", + "K I", + "CH R", + "Ġsubt ly", + "sw ers", + "Bl ake", + "us ual", + "Ġcontest ants", + "Ġcart ridges", + "ĠGRE AT", + "Ġbl ush", + "ĠâĢ º", + "47 2", + "Ġreason ed", + "ãĥ ¤", + "paralle led", + "Ġd yn", + "ag ate", + "Ġnight ly", + "å Ĩ", + "55 6", + "Ġsem antic", + "ĠAdv oc", + "Ġ !!", + "Ġdisag rees", + "ĠB W", + "V eh", + "Ġharm ing", + "Ġembr aces", + "Ġstri ves", + "Ġin land", + "ĠK ard", + "Ġhe ats", + "ĠGin ny", + "ut an", + "ern aut", + "yl ene", + "ĠE lev", + "J D", + "Ġh ars", + "ĠStar r", + "Ġsk ysc", + "Ġcollabor ators", + "Us ually", + "Ġrev olutions", + "ĠSTAT S", + "Ġdism antle", + "Ġconfident ly", + "Ġkin etic", + "Al i", + "Ġpercent ile", + "Ġextract ing", + "ill ian", + "est ead", + "Ġphysic ists", + "ĠMarsh al", + "Ġfell owship", + "Ġd ashed", + "ĠU R", + "ĠSi oux", + "ĠComp act", + "am ide", + "P ython", + "ĠLe igh", + "ĠPharm ac", + "ist rates", + "her ical", + "Ġf ue", + "ĠE min", + "Ġ( {", + "ĠNeighbor hood", + "Ġdisrupt ing", + "ĠD up", + "Ġg land", + "ĠSe v", + "ĠMar ian", + "arg on", + "ĠD und", + "Ġ< !--", + "Ġstr and", + "Ġstadium s", + "z os", + "Ġpsych osis", + "ĠR ack", + "Ġbrilliant ly", + "ï¸ ı", + "Ġsubmer ged", + "ĠInst it", + "ĠCh ow", + "Ġc ages", + "ĠH ats", + "ĠU rs", + "Ġdil uted", + "us at", + "ien ne", + "ĠMembers hip", + "ĠBur k", + "Ġ ie", + "Ġarche type", + "D rug", + "ult on", + "ĠSp ock", + "ĠMcK ay", + "ĠDep end", + "F eatured", + "S oc", + "19 78", + "ĠB ere", + "Ġrelent lessly", + "Ġcripp ling", + "Ġar thritis", + "çĶ Ł", + "ĠTrop ical", + "ĠBul g", + "ĠCher yl", + "Ġadm irable", + "Ġsub title", + "Over ride", + "Ġorig inating", + "ĠC CP", + "Ġsw ore", + "ĠSo le", + "ĠDis orders", + "3 29", + "Ġprocess ion", + "Ġref urb", + "Ġimm ersed", + "requ ently", + "Ġskept ics", + "Ġcer amic", + "m itter", + "en stein", + "b elt", + "ĠT IT", + "b idden", + "Ġf ir", + "m ist", + "> ]", + "Ġwe ave", + "ĠParad ox", + "Ġentr usted", + "ĠBarcl ays", + "Ġnovel ist", + "og ie", + "80 6", + "Ġnin ety", + "Ġdisag reements", + "@@@@ @@@@", + "ĠAus chwitz", + "c ars", + "ĠL ET", + "t ub", + "arant ine", + "P OS", + "Ġback story", + "Ġcheer ful", + "ĠR ag", + "ek a", + "bi ased", + "Ġinexper ienced", + "ak ra", + "ĠW itt", + "t an", + "Ġrap ist", + "Ġplate au", + "ch al", + "ĠInqu is", + "exp ression", + "Ġc ipher", + "Ġsh aving", + "add en", + "re ly", + "( \\", + "ism a", + "ĠReg ulatory", + "CH AR", + "ily n", + "N VIDIA", + "G U", + "Ġmur m", + "la us", + "Christ opher", + "Ġcontract ual", + "ĠPro xy", + "ĠJa ime", + "ĠMethod ist", + "Ġstew ards", + "st a", + "per ia", + "Ġphys iology", + "Ġbump ed", + "Ġf ructose", + "Austral ian", + "ĠMet allic", + "ĠMas querade", + "ar b", + "Ġprom ul", + "Ġdown fall", + "Ġbut cher", + "Ġb our", + "ĠIN FORMATION", + "ĠB is", + "pect s", + "ad ena", + "Ġcontempl ating", + "ar oo", + "cent ered", + "ĠPe aks", + "Us ed", + "Ġmod em", + "Ġg enders", + "Ġ8 000", + "37 1", + "Ġm aternity", + "ĠR az", + "Ġrock ing", + "Ġhandgun s", + "ĠD ACA", + "Aut om", + "ĠN ile", + "Ġtum ult", + "ĠBenef it", + "ĠAppro ach", + "works hop", + "ĠLe aving", + "G er", + "inst ead", + "Ġvibr ations", + "Ġrep ositories", + "49 7", + "ĠA unt", + "ĠJ ub", + "ĠExp edition", + "Al pha", + "Ġs ans", + "Ġoverd ue", + "Ġoverc rowd", + "Ġlegisl atures", + "Ġp aternal", + "ĠLeon ardo", + "Ġexp ressive", + "Ġdistract ions", + "Ġsil enced", + "tr ust", + "Ġb iking", + "Ġ5 60", + "Ġpropri et", + "Ġimp osition", + "Ġcon glomer", + "Ġ= ================================================================", + "ĠTe aching", + "ĠY ose", + "int ensive", + "T own", + "Ġtroll ing", + "ĠGr ac", + "ĠAS US", + "Y o", + "Ġspecial s", + "ĠNep h", + "ĠGod zilla", + "Dat abase", + "ĠHe gel", + "Ġ27 2", + "19 76", + "ĠGl oria", + "Ġdis emb", + "ĠInvestig ations", + "ĠB ane", + "ag ements", + "St range", + "Ġtre asury", + "ĠPl ays", + "Ġundes irable", + "Ġwid ening", + "Ġverb ally", + "Ġinf ancy", + "Ġcut ter", + "f ml", + "Ġ21 00", + "prot otype", + "f ine", + "Ġdec riminal", + "Ġdysfunction al", + "Ġbes ie", + "ĠErn st", + "z eb", + "Ġnort heastern", + "Ġa ust", + "por ate", + "ĠMar lins", + "Ġsegreg ated", + "ew orld", + "ĠMa her", + "Ġtra verse", + "Ġmon astery", + "ur gy", + "G ear", + "s and", + "Com pl", + "ĠE MP", + "Ġpl ent", + "ĠMer cer", + "Ġ27 6", + "TA BLE", + "Config uration", + "H undreds", + "Ġpr ic", + "Ġcollabor ating", + "ĠPar amount", + "ĠCumm ings", + "Ġ( <", + "Ġrecord er", + "Ġfl ats", + "Ġ4 16", + "wh ose", + "Font Size", + "ĠOr bit", + "Y R", + "Ġwr ists", + "Ġb akery", + ") }", + "ĠB ounty", + "ĠLanc aster", + "Ġend ings", + "acc ording", + "ĠSal am", + "e asy", + "75 5", + "ĠBur r", + "ĠBarn ett", + "onom ous", + "Un ion", + "Ġpreced ence", + "ĠScholars hip", + "ĠU X", + "Ġroll out", + "Ġbo on", + "al m", + "ĠCan ter", + "æ µ", + "Ġround ing", + "Ġcl ad", + "Ġv ap", + "ĠF eatured", + "is ations", + "Ġ5 40", + "pol ice", + "Ġunsett ling", + "Ġdr ifting", + "ĠLum ia", + "ĠObama Care", + "ĠF avor", + "Hy per", + "ĠRoth schild", + "ĠMil iband", + "an aly", + "ĠJul iet", + "H u", + "Ġrec alling", + "a head", + "69 6", + "Ġunf avorable", + "Ġd ances", + "O x", + "Ġleg ality", + "Ġ40 3", + "rom ancer", + "Ġinqu ire", + "ĠM oves", + "\\ \">", + "ĠVari ant", + "ĠMess iah", + "ĠL CS", + "ĠBah á", + "75 6", + "Ġeyeb row", + "Ġ ¥", + "ĠMc F", + "ĠFort y", + "M as", + "Ġpan icked", + "Ġtransform ations", + "q q", + "Ġrev olves", + "ring e", + "ĠA i", + "ax e", + "Ġon ward", + "ĠC FR", + "ĠB are", + "log in", + "Ġliqu ids", + "Ġde comp", + "second ary", + "il an", + "ĠCon vert", + "ami ya", + "Ġprosecut ing", + "Ġâī ¡", + "ĠYork ers", + "ĠByr ne", + "sl ow", + "aw ei", + "J ean", + "Ġ26 9", + "ĠSky dragon", + "Ġ é", + "ĠNicarag ua", + "ĠHuck abee", + "ĠHigh ly", + "Ġamph ib", + "ĠPast or", + "ĠL ets", + "Ġbl urred", + "Ġvisc eral", + "ĠC BO", + "Ġcollabor ated", + "z ig", + "Leg al", + "Ġapart heid", + "Ġbr id", + "Ġpres et", + "ĠD ET", + "ĠAM A", + "× Ķ", + "arch ing", + "auc uses", + "build er", + "Ġpo etic", + "Ġem ulator", + "ĠMole cular", + "Ġhon oring", + "ise um", + "Ġtract or", + "ĠCl uster", + "ĠCal m", + "ared evil", + "Ġsidew alks", + "Ġviol in", + "Ġgeneral ized", + "ĠAle c", + "Ġemb argo", + "Ġfast ball", + "ĠHT TPS", + "ĠL ack", + "ĠCh ill", + "ri ver", + "C hel", + "ĠSw arm", + "ĠLev ine", + "ro ying", + "L aunch", + "Ġkick er", + "Ġadd itive", + "ĠDe als", + "W idget", + "cont aining", + "Ġescal ate", + "ĠOP EN", + "Ġtwe aked", + "Ġst ash", + "Ġsp arks", + "ĠEs sex", + "ĠE cc", + "Ġconv ict", + "Ġblog ging", + "I ER", + "ĠH L", + "Ġmurd erers", + "75 9", + "ĠH ib", + "Ġde pl", + "ĠJ ord", + "S ac", + "Ġdis sect", + "ĠHow e", + "os her", + "Ġcustom izable", + "ĠFran z", + "Ġat ro", + "Ä ĩ", + "Ġ000 4", + "Ġout post", + "R oss", + "Ġglyph osate", + "ĠHast ings", + "ĠBE FORE", + "Ġsh ove", + "o pped", + "ĠSc ala", + "Ġam ulet", + "an ian", + "Ġexacerb ated", + "Ġe ater", + "47 1", + "UM E", + "Ġpul p", + "izont al", + "ĠZ am", + "ĠAT I", + "imm une", + "aby tes", + "Ġunnecess arily", + "ĠC AT", + "ĠAx is", + "Ġvisual ize", + "à ī", + "ĠRad ical", + "f m", + "Doc uments", + "ĠFor rest", + "Ġcontext ual", + "ĠSy mbol", + "Ġtent ative", + "ĠDO ES", + "ĠGood s", + "Ġintermitt ent", + "} :", + "medi ated", + "Ġridic ule", + "Ġathe ism", + "Ġpath ogens", + "ĠM um", + "Ġre introdu", + "Ġ30 7", + "i HUD", + "Ġflash light", + "Ġsw earing", + "Ġp engu", + "B u", + "Ġrot ated", + "ĠCr ane", + "Ġ() );", + "Ġfashion able", + "Ġendors ing", + "46 3", + ") [", + "Ġingest ion", + "Ġcook s", + "Ġ9 50", + "ot omy", + "ĠIm am", + "Ġk a", + "Ġte aser", + "ĠGhost s", + "ĠãĤ µ", + "19 69", + "Ï ĥ", + "ub by", + "Ġconver ter", + "zan ne", + "end e", + "ĠPre par", + "ĠNic kel", + "ĠChim era", + "h im", + "ĠTyr ann", + "ĠSabb ath", + "ĠNich ols", + "Ġra pt", + "ih ar", + "Ġshe lling", + "Ġillum inate", + "Ġdent ist", + "ut or", + "ĠInteg ration", + "Ġwh ims", + "ĠLiter ary", + "Be aut", + "Ġp archment", + "ag ara", + "Br and", + "Ġder og", + "âĢ¦ )", + "ĠNor se", + "Ġunw itting", + "Ġc uc", + "Ġborder line", + "Ġupset ting", + "Ġrec ourse", + "Ġd raped", + "ĠRad ar", + "Ġcold er", + "ĠPep si", + "im inary", + "], [", + "65 8", + "V i", + "ĠF rem", + "ĠP es", + "Ġveter inary", + "ĠT ED", + "ĠEp idem", + "n ova", + "k id", + "Ġdev out", + "o ct", + "j ad", + "M oh", + "ĠP AY", + "Ġge ometric", + "Ġ3 23", + "Ġcircum ference", + "ich ick", + "19 75", + "ĠY uri", + "ĠSh all", + "ĠH over", + "un in", + "S pr", + "Ġg raft", + "ĠHapp iness", + "Ġdisadvant ages", + "att acks", + "Ġhub s", + "ĠStar Craft", + "é ĸ", + "Ġgall eries", + "ĠKor ra", + "Ġgrocer ies", + "ĠGors uch", + "Ġrap ists", + "Ġfun gi", + "ĠTyph oon", + "V ector", + "ĠEm press", + "b attle", + "4 68", + "Ġparas ite", + "ĠBom ber", + "S G", + "ex ist", + "ĠP f", + "Ġun se", + "Ġsurge ons", + "B irth", + "ĠUn sure", + "ĠPrint ed", + "ĠBehavior al", + "ĠA ster", + "Pak istan", + "Ġun ethical", + "Ġs v", + "ĠIo T", + "Ġlay outs", + "P ain", + "Ġconst ants", + "ĠL W", + "ĠB ake", + "Ġtow els", + "Ġdeterior ation", + "ĠBol ivia", + "Ġblind ed", + "ĠW arden", + "ĠMist ress", + "Ġon stage", + "Ġcl ans", + "ĠB EST", + "19 60", + "Ġant ique", + "Ġrhet orical", + "ĠPer cy", + "ĠRw anda", + ", .", + "B ruce", + "Ġtra umat", + "ĠParliament ary", + "Ġfoot note", + "id ia", + "ĠLear ned", + "se eking", + "gen ic", + "Ġdim ensional", + "H ide", + "èĢ ħ", + "Ġintrig ue", + "in se", + "Ġle ases", + "Ġapp rentices", + "w ashing", + "Ġ19 26", + "V ILLE", + "Ġsw oop", + "s cl", + "Ġbed rooms", + "on ics", + "ĠCr unch", + "comp atible", + "Ġincap ac", + "ĠYemen i", + "ash tra", + "z hou", + "d anger", + "Ġmanifest ations", + "ĠDem ons", + "AA F", + "Secret ary", + "ACT ED", + "L OD", + "Ġam y", + "ra per", + "eth nic", + "4 17", + "Ġpos itives", + "Ġ27 3", + "ĠRefuge es", + "Ġus b", + "ĠV ald", + "odd y", + "ĠMahm oud", + "As ia", + "Ġskull s", + "ĠEx odus", + "ĠComp et", + "ĠL IC", + "ĠM ansion", + "ĠA me", + "Ġconsolid ate", + "storm s", + "ont ent", + "99 6", + "Ġcl en", + "Ġm ummy", + "fl at", + "75 8", + "ĠV OL", + "oter ic", + "n en", + "ĠMin ute", + "S ov", + "Ġfin er", + "R h", + "ly cer", + "Ġreinforce ments", + "ĠJohann es", + "ĠGall agher", + "Ġgym n", + "S uddenly", + "Ġext ortion", + "k r", + "i ator", + "T a", + "Ġhippocamp us", + "N PR", + "ĠComput ing", + "Ġsquare ly", + "Ġmod elling", + "ĠFor ums", + "ĠL isp", + "ĠKrish na", + "Ġ3 24", + "Ġr ushes", + "Ġens ued", + "Ġcre eping", + "on te", + "n ai", + "il ater", + "ĠHorn ets", + "Ġob livious", + "IN ST", + "55 9", + "Ġjeopard y", + "Ġdistingu ishing", + "j ured", + "Ġbeg s", + "sim ilar", + "ph ot", + "5 30", + "ĠPark way", + "Ġs inks", + "ĠHearth stone", + "ib ur", + "ĠBat on", + "Av oid", + "Ġd ancer", + "Ġmag istrate", + "ary n", + "Ġdisturb ances", + "ĠRom ero", + "Ġpar aph", + "Ġmis chief", + "âĸ ĵ", + "ĠSh aria", + "Ġur inary", + "r oute", + "iv as", + "f itted", + "Ġeject ed", + "ĠAl buquerque", + "Ġ4 70", + "Ġirrit ated", + "ĠZ ip", + "ĠB iol", + "à į", + "Ġden ounce", + "Ġbin aries", + "ĠVer se", + "Ġopp os", + "ĠKend rick", + "ĠG PL", + "Ġsp ew", + "ĠEl ijah", + "ĠE as", + "Ġdr ifted", + "so far", + "Ġannoy ance", + "ĠB ET", + "47 4", + "ĠSt rongh", + "it ates", + "ĠCogn itive", + "oph one", + "ĠIdent ification", + "ocr ine", + "connect ion", + "Ġbox er", + "ĠAS D", + "ĠAre as", + "Y ang", + "t ch", + "ull ah", + "Ġdece ive", + "Comb at", + "ep isode", + "cre te", + "W itness", + "Ġcondol ences", + "ht ar", + "Ġhe als", + "Ġbuck ets", + "ĠLA W", + "B lu", + "Ġsl ab", + "ĠOR DER", + "oc l", + "att on", + "ĠSteven son", + "ĠG inger", + "ĠFriend ly", + "ĠVander bilt", + "sp irit", + "ig l", + "ĠReg arding", + "ĠPR OG", + "Ġse aling", + "start ing", + "Ġcard inal", + "ĠV ec", + "ĠBe ir", + "Ġmillisec onds", + "we ak", + "per se", + "Ġster ile", + "ĠCont emporary", + "ĠPh ant", + "ĠCl o", + "Ġout p", + "Ġex iled", + "Ġ27 7", + "Ġself ie", + "Ġman ic", + "Ġn ano", + "ter ms", + "Alex ander", + "Ġres olves", + "Ġmillenn ia", + "Ġexpl odes", + "Ġconst ellation", + "Ġadul tery", + "m otion", + "D OC", + "Ġbroad casters", + "Ġkinderg arten", + "ĠMay weather", + "ĠE co", + "ich o", + "Ġ28 7", + "l aun", + "Ġm ute", + "Ġdisc reet", + "Ġpres chool", + "Ġpre empt", + "De lete", + "ĠFre ed", + "P i", + "H K", + "Ġblock er", + "ĠC umber", + "Ġw rought", + "d ating", + "Ġins urer", + "Ġquot as", + "Ġpre ached", + "Ġev iction", + "ĠReg ina", + "ĠP ens", + "Ġsevent een", + "ĠN ass", + "D ick", + "Ġfold s", + "Ġd otted", + "ĠA ad", + "Un iversal", + "Ġp izz", + "ĠG uru", + "Ġso ils", + "Ġno vice", + "ĠNe ander", + "Ġst ool", + "Ġdeton ated", + "ĠPik achu", + "ĠMass ive", + "IV ER", + "ĠAb del", + "Ġsubdu ed", + "Ġtall est", + "Ġprec arious", + "Ġa y", + "r ification", + "ĠOb j", + "c ale", + "Ġun question", + "cul osis", + "ad as", + "igr ated", + "D ays", + "Ġque ens", + "ĠGaz ette", + "ĠCol our", + "ĠBow man", + "ĠJ J", + "ï ve", + "Ġdomin ates", + "Stud ent", + "Ġm u", + "Ġback log", + "ĠElect ro", + "Tr uth", + "48 3", + "Ġcond ensed", + "r ules", + "ĠCons piracy", + "Ġacron ym", + "hand led", + "ĠMat te", + "j ri", + "ĠImp ossible", + "l ude", + "cre ation", + "Ġwar med", + "ĠSl ave", + "Ġmis led", + "Ġfer ment", + "ĠK ah", + "ink i", + "ke leton", + "cy l", + "ĠKar in", + "Hun ter", + "Reg ister", + "ĠSur rey", + "Ġst ares", + "ĠW idth", + "ĠN ay", + "ĠSk i", + "Ġblack list", + "uck et", + "Ġexp ulsion", + "im et", + "Ġret weet", + "vant age", + "Fe ature", + "Ġtro opers", + "Ġhom ers", + "9 69", + "Ġconting ency", + "ĠW TC", + "ĠBrew er", + "fore ign", + "W are", + "S olar", + "Ġund ue", + "RE C", + "ulner able", + "path ic", + "ĠBo ise", + "Ġ3 22", + "Ġarous ed", + "ĠY ing", + "ä¸ į", + "uel ess", + "Ġp as", + "Ġmor p", + "Ġfl oral", + "Ex press", + "ud ging", + "k B", + "ĠGr anted", + "Ø ¯", + "ĠMich a", + "ĠGoth ic", + "ĠSPEC IAL", + "ĠRic ardo", + "F ran", + "Ġadminister ing", + "6 20", + "por a", + "Ġ ®", + "Ġcomprom ises", + "Ġb itten", + "Ac cept", + "Th irty", + "Ð ²", + "Ġmater ially", + "ĠTer r", + "ig matic", + "ch ains", + "Ġdo ve", + "stad t", + "Mar vel", + "FA ULT", + "Ġwind shield", + "Ġ3 36", + "ad ier", + "Ġsw apping", + "Ġflaw less", + "ĠPred ator", + "ĠMiche le", + "Ġprop ulsion", + "ĠPsych ic", + "Ġassign ing", + "Ġfabric ation", + "Ġbar ley", + "l ust", + "Ġtow ering", + "Ġalter cation", + "ĠBent ley", + "Sp here", + "Ġtun a", + "ĠClass es", + "Fre edom", + "un er", + "L ady", + "v oice", + "Ġcool est", + "or r", + "Ġpal p", + "$ {", + "Ġhyster ia", + "ĠMet atron", + "p ants", + "Ġspawn ing", + "Exper ts", + "ĠInvest ors", + "ĠAn archy", + "Ġshr unk", + "ĠVict im", + "Ġ28 9", + "Ġec stasy", + "ĠB inding", + "58 5", + "ĠMel ody", + "57 8", + "ot ally", + "ĠE tsy", + "lig a", + "Ġapplaud ed", + "Ġswe ating", + "Ġredist ributed", + "Ġpop corn", + "Ġsem inal", + "f ur", + "ĠNeuro science", + "R and", + "ĠO st", + "ĠMadd en", + "ĠIncre asing", + "ĠDaw kins", + "ĠSub way", + "Ġar sen", + "cons erv", + "B UR", + "Ġsp iked", + "ĠLy ft", + "ĠImper ium", + "ĠDrop box", + "Ġfav oured", + "Ġencomp asses", + "gh ost", + "Ġins pires", + "Ġbur geoning", + "ĠY oshi", + "ĠVert ical", + "ĠAud itor", + "Ġint ending", + "Ġfilib uster", + "Bl oom", + "f ac", + "ĠCav s", + "ign ing", + "Ġcowork ers", + "ĠBarb arian", + "rem ember", + "FL AG", + "Ġaudit ory", + "ason ry", + "Col lege", + "Ġmut ed", + "gem ony", + "ob in", + "ĠPsych o", + "9 68", + "Ġlav ish", + "Ġhierarch ical", + "ĠDr one", + "ou k", + "Ġcripp led", + "ĠMax im", + "Sl ot", + "Ġqu iz", + "ĠV id", + "if ling", + "Ġarchae ologists", + "Ġabandon ment", + "d ial", + "le on", + "ĠF as", + "T ed", + "Ġr aspberry", + "Ġmaneu vers", + "Ġbehavi ours", + "Ġins ure", + "Ġrem od", + "Sw itch", + "h oe", + "Ġsp aced", + "Ġafford ability", + "ĠF ern", + "not ation", + "ĠBal anced", + "Ġoccup ies", + "en vironment", + "Ġneck lace", + "Ġsed an", + "F U", + "ĠBrav o", + "Ġab users", + "ĠAn ita", + "met adata", + "ĠG ithub", + "ait o", + "ĠF aster", + "ĠWass erman", + "ĠF lesh", + "Ġth orn", + "r arily", + "ĠMer ry", + "w ine", + "Ġpopul ace", + "ĠL ann", + "Ġrepair ing", + "Ġpsy che", + "Ġmod ulation", + "aw aru", + "âĢĭ âĢĭ", + "ari j", + "Ġdecor ations", + "Ġapolog ise", + "ĠG arg", + "app ly", + "Ġgive away", + "ĠFl an", + "ĠWy att", + "U ber", + "Ġauthor ised", + "ĠMor al", + "HAHA HAHA", + "activ ate", + "Ġtorped o", + "ĠF AR", + "Ġam assed", + "ĠA ram", + "ark in", + "ĠVict ims", + "st ab", + "Ġo m", + "ĠE CO", + "Ġopio ids", + "Ġpurpose ly", + "ĠV est", + "Ġer g", + "at an", + "ĠSur gery", + "Ġcorrect ing", + "ĠOrt iz", + "ĠBe et", + "Ġrev oke", + "Ġfre eway", + "ĠH iggins", + "F ail", + "ĠFar ms", + "ĠAT P", + "h ound", + "Ġp oking", + "ĠCommun ists", + "mon ster", + "iment ary", + "Ġunlock ing", + "Ġunf it", + "we ed", + "en ario", + "at ical", + "ĠEnlight enment", + "ĠN G", + "ĠComp ensation", + "de en", + "ĠWid ow", + "ĠCind y", + "ĠAfter wards", + "Ġ6 000", + "ikh ail", + "ag ically", + "Ġrat ified", + "Ġcasual ty", + "H OME", + "p sey", + "f ee", + "Ġspark ling", + "Ġd é", + "Ġconcert ed", + "C atal", + "Ġcomp lying", + "ĠA res", + "ĠD ent", + "Sh ut", + "Ġsk im", + "ad minist", + "Ġhost ilities", + "ĠG ins", + "Ġ6 08", + "Ġm uddy", + "ĠMc Int", + "ĠDec ay", + "5 25", + "Ġconspic uous", + "ĠEx posure", + "Ġresc ind", + "Ġwear able", + "Ġ3 28", + "our met", + "ah s", + "ĠRob ots", + "Ġe clips", + "inst ance", + "ĠRE PORT", + "ĠApp l", + "0 30", + "ĠSk ies", + "01 00", + "Ġfall acy", + "S ocket", + "ĠRece iver", + "Ġsol ves", + "ĠButter fly", + "ĠSho pping", + "ĠFI RE", + "65 4", + "Med ic", + "Ġsing ers", + "ĠNeed less", + "'' ''", + "isher s", + "ĠD ive", + "58 8", + "Ġselect ively", + "Ġcl umsy", + "88 9", + "Ġpurch aser", + "ear ned", + "ard y", + "Ġbenef iting", + "eng lish", + "Ġyield ing", + "ĠP our", + "Ġspin ach", + "Ġdel ve", + "ĠC rom", + "6 10", + "Ġexport ing", + "ĠMA KE", + "Ġ26 3", + "Ġg rop", + "Ġenv oy", + "ĠInqu iry", + "ĠLu igi", + "d ry", + "ĠT uring", + "Thumbnail Image", + "ĠVar iety", + "Ġfac et", + "Ġfl uffy", + "Ġexcerpt s", + "Ġsh orth", + "ĠOl sen", + "CL UD", + "Ġrel iant", + "ĠUN C", + "T our", + "Ġbat hing", + "Comp any", + "Ġglobal ization", + "P red", + "ĠMalf oy", + "Ġh oc", + "j am", + "craft ed", + "ĠBond s", + "ĠKiss inger", + "Eng land", + "Ġorder ly", + "cat entry", + "Ġ26 1", + "Ġexch anging", + "ĠInt ent", + "ĠAmend ments", + "D OM", + "Ġst out", + "³³³³³³³³ ³³³³³³³³", + "ĠAir bus", + "Ġ27 8", + "hy de", + "P oll", + "Item ThumbnailImage", + "Ġlooph oles", + "ĠPill ar", + "Ġexpl or", + "St retch", + "A part", + "Ġun married", + "Lim it", + "ĠTransform ers", + "Ġintellect ually", + "unct ure", + "18 00", + "Ġd arn", + "B razil", + "Ġleft over", + "ber us", + "f red", + "Mine craft", + "3 26", + "ĠForm s", + "Ġproof s", + "ĠDes igned", + "Ġindex es", + "ĠSupp ose", + "EM S", + "ĠL oving", + "ĠBon nie", + "im ating", + "OT US", + "Ġconduct or", + "Ġbehav ed", + "ĠF ren", + "Ġsy nerg", + "Ġmillenn ium", + "Ġcater ing", + "ĠL auder", + "W r", + "ĠY iannopoulos", + "ĠAT F", + "Ġensl aved", + "Ġawaken ed", + "D VD", + "ĠED ITION", + "ĠConc ert", + "ĠChall enger", + "ĠH aku", + "umer ic", + "Ġdep recated", + "ĠSH AR", + "4 12", + "Ġdy stop", + "Ġtremb ling", + "Ġdread ed", + "ĠSp ac", + "p adding", + "Re pl", + "ĠG arrison", + "M ini", + "Ġun paralleled", + "am ar", + "URR ENT", + "w reck", + "c ertain", + "t al", + "ĠC LS", + "app ings", + "Ġsens ed", + "Ġf encing", + "ĠPas o", + "ĠDes k", + "Ġsc off", + "Ġcontem plate", + "ĠL iga", + "l iquid", + "75 7", + "Ġapp rentice", + "ĠUCH IJ", + "5 70", + "ĠTh ousand", + "ĠIll um", + "Ġchampion ed", + "ãĤ Į", + "Ġelect ors", + "Ġ3 98", + "ĠH ancock", + "round ed", + "ĠJ OHN", + "Ġuns atisf", + "Ġqual ifier", + "ĠGad get", + "EN E", + "Ġdead liest", + "ĠPl ants", + "Ġ ions", + "Ġacc ents", + "Ġtwe aking", + "Ġsh aved", + "F REE", + "ĠCh aser", + "Again st", + "9 60", + "Ġmeth amphetamine", + "Ġnormal ized", + "Ġ$ \\", + "ĠPre cision", + "ĠGu am", + "Ġch oked", + "ĠX II", + "ĠCast ing", + "Tor rent", + "Ġscal p", + "ĠJagu ar", + "w it", + "Ġsem ic", + "ix ie", + "ĠG ould", + "Ġconf ines", + "N usra", + "ĠL on", + "ĠJ ugg", + "y cle", + "ĠCod ec", + "E gypt", + "Ġrest rain", + "ĠAl iens", + "Ġch oking", + "ĠD unk", + "ĠBell a", + "ab c", + "Ġsl ang", + "Ġneuro trans", + "s av", + "Ġempower ment", + "â ĨĴ", + "Ġclim bers", + "ĠM im", + "ĠF ra", + "ros se", + "Cap ital", + "ĠCth ulhu", + "Inter face", + "Ġprof icient", + "ĠIN TO", + "Ġ3 18", + "ront al", + "5 80", + "ĠDes pair", + "K enn", + "Ġscrim mage", + "ĠCo at", + "as ions", + "Ġwall paper", + "ĠJ ol", + "Ġresurg ence", + "Ġant iv", + "ĠB alls", + "² ¾", + "Ġbuff ers", + "Ġsub system", + "ĠSt ellar", + "ĠL ung", + "A IDS", + "Ġerad icate", + "Ġblat antly", + "Ġbehav es", + "ĠN un", + "Ġant ics", + "ex port", + "DE V", + "w b", + "Ġph p", + "ĠInteg rity", + "Ġexplore r", + "Ġrev olving", + "auth ored", + "g ans", + "Ġbas k", + "Ġas ynchronous", + "å į", + "TH ING", + "69 8", + "G ene", + "ĠR acer", + "ĠN ico", + "iss ued", + "Ġser mon", + "p ossibly", + "Ġsize of", + "Ġentrepreneur ial", + "ox in", + "ĠMin erva", + "Ġpl atoon", + "n os", + "ri ks", + "A UT", + "ĠAval anche", + "ĠDes c", + "ij 士", + "ĠP oc", + "Ġconf erred", + "Î »", + "Ġpat ched", + "F BI", + "66 2", + "Ġfract ures", + "Ġdetect s", + "Ġded icate", + "Ġconstitu ent", + "Ġcos mos", + "W T", + "Ġswe ats", + "Ġspr ung", + "b ara", + "s olid", + "Ġuns us", + "Ġbul ky", + "ĠPhilipp e", + "ĠFen rir", + "Ġtherap ists", + "ore al", + "^^ ^^", + "Ġtotal ed", + "Ġboo ze", + "ĠR PC", + "Prosecut ors", + "Ġdis eng", + "ĠSh ared", + "Ġmotor cycles", + "Ġinvent ions", + "Ġlett uce", + "ĠMer ge", + "ĠJ C", + "Ġspiritual ity", + "ĠWAR NING", + "Ġunl ucky", + "ĠT ess", + "Ġtong ues", + "ĠD UI", + "T umblr", + "Ġle ans", + "Ġinv aders", + "Ġcan opy", + "ĠHur ricanes", + "ĠB ret", + "ĠAP PLIC", + "id ine", + "ick le", + "Reg arding", + "Ġve ggies", + "Ġe jac", + "ju ven", + "F ish", + "D EM", + "ĠD ino", + "Th row", + "ĠCheck ing", + "be ard", + "( &", + "Ġj ails", + "Ġh r", + "trans fer", + "iv ating", + "Ġfle ets", + "ĠIm ag", + "ĠMc Donnell", + "Ġsnipp et", + "Is a", + "ĠCh att", + "ĠSt ain", + "ĠSet FontSize", + "ĠO y", + "ĠMathemat ics", + "49 4", + "Ġelectro ly", + "ĠG ott", + "ĠBr as", + "B OOK", + "ĠF inger", + "d ump", + "Ġmut ants", + "Ġrent als", + "Ġinter tw", + "Ġc reek", + "ail a", + "Bro ther", + "ĠDisc ord", + "pe e", + "raw ler", + "Ġcar p", + "Ġ27 9", + "ãĤ· ãĥ£", + "rel ations", + "Ġcontr asts", + "Col umn", + "Ġrec onnaissance", + "Ġun know", + "Ġl ooting", + "Ġregul ates", + "Ġopt imum", + "ĠChero kee", + "ĠA ry", + "Lat est", + "Ġroad side", + "Ġd anced", + "ĠUnic orn", + "A cknowled", + "Ġuncont roll", + "ĠM US", + "at io", + "ch ance", + "ha ven", + "VAL UE", + "Ġfavour ites", + "Ġceremon ial", + "b inary", + "pe ed", + "wood s", + "EM P", + "Ġv ascular", + "Ġcontempl ated", + "Ġbar ren", + "ĠL IST", + "Y ellow", + "ospons ors", + "Ġwhisk y", + "ĠM amm", + "ĠDeV os", + "min imum", + "H ung", + "44 2", + "P ic", + "ĠSnap dragon", + "77 6", + "Ġcar ving", + "Ġund ecided", + "Ġadvantage ous", + "Ġpal ms", + "ĠA Q", + "Ġst arch", + "L oop", + "Ġpadd le", + "Ġfl aming", + "ĠHor izons", + "An imation", + "bo ost", + "Ġprob abilities", + "ĠM ish", + "Ġex odus", + "ĠEditor ial", + "Ġfung us", + "Ġdissent ing", + "ĠDel icious", + "rog ram", + "ĠD yn", + "d isk", + "t om", + "Ġfab rics", + "ĠC ove", + "ĠB ans", + "Ġsoft en", + "ĠCON S", + "Ġin eligible", + "Ġestim ating", + "ĠLex ington", + "pract ice", + "of i", + "Ġshe dding", + "ĠN ope", + "Ġbreat hed", + "ĠCorinth ians", + "y ne", + "ek i", + "B ull", + "Ġatt aching", + "reens hots", + "Ġanaly se", + "ĠK appa", + "Ġuns ustainable", + "Ġinter pol", + "ank y", + "he mer", + "Ġprot agonists", + "Ġform atted", + "ĠBry ce", + "ĠAch illes", + "ĠAb edin", + "sh ock", + "Ġb um", + "b os", + "qu a", + "ĠW arn", + "q t", + "ĠDi abetes", + "8 64", + "ĠIn visible", + "Ġvan ish", + "Ġtrans mitting", + "Ġmur ky", + "ĠFe i", + "Ġawa ited", + "ĠJur assic", + "umm ies", + "Ġmen acing", + "g all", + "C ath", + "B uilt", + "ild o", + "ĠV otes", + "Ġon t", + "Ġmun itions", + "ĠFre em", + "ÃŃ n", + "Ġdec ency", + "lo pp", + "ie ved", + "ĠG ord", + "Ġun thinkable", + "ĠNews week", + "Ġ3 21", + "He at", + "Ġpresent er", + "ji ang", + "Ġpl ank", + "ĠAval on", + "Ġben z", + "ĠR out", + "Ġslam ming", + "ĠD ai", + "ou ter", + "ĠCook ie", + "ĠAlic ia", + "ge y", + "Ġvan ity", + "Ġow l", + "á µ", + "t ested", + "ĠAw akens", + "Ġcan v", + "Ġblind ly", + "ĠRid ley", + "ĠEm ails", + "Requ ires", + "ĠSer bian", + "ograp hed", + "if rame", + "eter ia", + "Ġaltern ating", + "qu iet", + "Ġsoc iology", + "ĠUn lock", + "ĠCommun ism", + "Ġo ps", + "Ġatt ribution", + "Ġab duction", + "ĠAb ram", + "Ġsidel ined", + "ĠB OOK", + "Ġref ining", + "ĠFe eling", + "ĠOs lo", + "ĠPru itt", + "r ack", + "ang ible", + "Ġcaut iously", + "ĠM ARK", + "eed s", + "M ouse", + "ĠStep h", + "ĠP air", + "S ab", + "99 7", + "ĠBa al", + "B ec", + "Ġcomm a", + "ĠP all", + "ĠG ael", + "Ġmisunder stand", + "ĠP esh", + "Order able", + "Ġdis mal", + "ĠSh iny", + "% \"", + "Ġreal istically", + "Ġpat io", + "ĠG w", + "ĠVirt ue", + "Ġexhaust ing", + "wh atever", + "oph ys", + "y ip", + "4 18", + "Ad just", + "ĠWa iting", + "ess on", + "ĠMaz da", + "ĠDo zens", + "Ġstream lined", + "Ġincompet ence", + "ĠM eth", + "Ġeth os", + "ON ES", + "Ġincent iv", + "Ġgr itty", + "ĠBut cher", + "Head er", + "Ġexp onential", + "à Ł", + "Ġcorrel ate", + "Ġcons ensual", + "s ounding", + "R ing", + "Orig in", + "Ġcon clusive", + "fe et", + "ac ly", + "ĠF ernandez", + "Buy able", + "Ġd ucks", + "aunt lets", + "Ġel ong", + "Ġ28 6", + "Ġsim ul", + "G as", + "ĠK irst", + "Ġprot r", + "ĠRob o", + "ĠAo E", + "op ol", + "Ġpsych ologically", + "sp in", + "ilater ally", + "ĠCon rad", + "W ave", + "44 1", + "ĠAd vertisement", + "ĠHarm on", + "ĠOri ental", + "is Special", + "Ġpresum ptive", + "Ġw il", + "ĠK ier", + "ne a", + "Ġp pm", + "Ġhar bour", + "ĠW ired", + "comp any", + "Ġcor oner", + "atur days", + "ĠP roud", + "ĠN EXT", + "ĠFl ake", + "val ued", + "ce iver", + "Ġfra ught", + "Ġc asing", + "Ġrun away", + "Ġg in", + "ĠLaure nt", + "ĠHar lem", + "ĠCur iosity", + "qu ished", + "Ġneuro science", + "ĠH ulu", + "Ġborrow er", + "Ġpetition er", + "ĠCo oldown", + "W ARD", + "Ġinv oking", + "conf idence", + "For ward", + "Ġst s", + "pop ulation", + "Delivery Date", + "Fil m", + "ĠC ov", + "quick Ship", + "quickShip Available", + "prim ary", + "isSpecial Orderable", + "inventory Quantity", + "channel Availability", + "BO X", + "ĠMulti player", + "ĠJen ner", + "77 8", + "ĠM d", + "Ġ~ /.", + "M N", + "Ġchild ish", + "Ġantioxid ant", + "ĠChrom ebook", + "Ġ27 4", + "Ġscreen play", + "Ġadvent urous", + "ĠRelations hip", + "respons ive", + "ming ton", + "Ġcorner stone", + "ĠF ey", + "F IR", + "Ġrook ies", + "ĠF eaturing", + "Ġorig inate", + "Ġelectro des", + "ant es", + "Ġscript ures", + "Ġgl ued", + "Ġdiscont ent", + "Ġaff licted", + "lay out", + "B rave", + "Ġm osa", + "ĠQuant ity", + "ĠH ik", + "w inner", + "H ours", + "Ġent ail", + "ĠCell s", + "olog ue", + "Ġv il", + "Ġpre acher", + "Ġdecor ative", + "d ifferent", + "Ġprejud ices", + "ĠSm oking", + "ĠNotting ham", + "so Type", + "Ġrhyth ms", + "ĠAl ph", + "bl ast", + "Ste el", + "ĠDaniel le", + "Ġstr ife", + "Ġrem atch", + "so DeliveryDate", + "ĠF ork", + "t rip", + "ol ulu", + "hes es", + "C G", + "ĠPOLIT ICO", + "ost a", + "ĠDr ift", + "é¾įå ¥", + "é¾įå¥ ij士", + "Ġvet ting", + "ĠJin ping", + "ĠRec ession", + "Min or", + "ĠF raud", + "enf ranch", + "Ġconven ed", + "ĠNA ACP", + "ĠMill ions", + "ĠFarm ing", + "ĠW oo", + "ĠFl are", + "rit o", + "imm igrant", + "Ġvac ancy", + "ĠHE AD", + "ĠV aj", + "eg al", + "ĠV igil", + "Stud y", + "Ġru ining", + "Ġr acks", + "Ġhe ater", + "ĠRand olph", + "ĠBr ush", + "ĠT ir", + "Ø ¨", + "Ġc ov", + "% ]", + "Ġrecount s", + "ĠO PT", + "ĠM elt", + "Ġtr uce", + "Ġcas inos", + "Ġcrus ade", + "Ġcarn age", + "Ġstri pe", + "ĠK yl", + "Text ures", + "Ġ6 98", + "Ġpro clamation", + "Ġgood ies", + "Ġ........ ..", + "pro claimed", + "P olit", + "Ġtop ical", + "Ġspecial ize", + "ĠA min", + "g m", + "Ġanch ored", + "Ġbear ings", + "s ample", + "ĠHigh land", + "ĠAut ism", + "Ġmerc enary", + "Ġinterview er", + "L ER", + "ĠSom ers", + "Ġembry o", + "ĠAss y", + "Ġ28 1", + "ĠEd iting", + "ĠCh osen", + "6 60", + "Ġp ci", + "ĠThunder bolt", + "BI LL", + "Ġchuck led", + "jri wal", + "h of", + "Ġearth ly", + "() {", + "ind ependence", + "Ġdisp ers", + "ĠV endor", + "ĠG areth", + "Ġp als", + "P enn", + "ĠSub mit", + "ic um", + "Th u", + "Ġcl andestine", + "Ġcann ibal", + "ĠCl erk", + "E Stream", + "gal itarian", + "âĻ ¥", + "g ew", + "Ġhor rend", + "ĠL ov", + "ĠRe action", + "ocr in", + "Class ic", + "Ġecho ing", + "Ġdiscl osing", + "ĠIns ight", + "og un", + "ĠInc arn", + "upload s", + "pp erc", + "guy en", + "Ġ19 01", + "ĠB ars", + "68 7", + "Ġb ribes", + "ĠFres no", + "ur at", + "ĠRe ese", + "Ġintr usive", + "Ġgri pping", + "ĠBlue print", + "ĠR asm", + "un ia", + "man aged", + "ĠHeb do", + "Ġ3 45", + "Ġdec oding", + "Ġpo ets", + "Ġj aws", + "ĠF IGHT", + "am eless", + "ĠMead ows", + "ĠHar baugh", + "Inter view", + "ĠH osp", + "ĠB RA", + "Ġdelet ion", + "m ob", + "W alker", + "ĠMoon light", + "ĠJ ed", + "ĠSoph ia", + "Ġus ur", + "Ġfortun ately", + "ĠPut ting", + "ĠF old", + "Ġsan itation", + "Ġpart isans", + "IS ON", + "B ow", + "ĠCON C", + "ĠRed uced", + "ĠS utton", + "Ġtouch screen", + "Ġembry os", + "âĢ¢âĢ¢ âĢ¢âĢ¢", + "ĠK rug", + "com bat", + "ĠPet roleum", + "Ġam d", + "ĠCos mos", + "Ġpresc ribing", + "Ġconform ity", + "ours es", + "Ġplent iful", + "Ġdis illusion", + "ĠEc ology", + "itt al", + "Ġf anc", + "Ġassass inated", + "regn ancy", + "Ġperenn ial", + "ĠBul lets", + "Ġst ale", + "Ġc ached", + "ĠJud ith", + "ĠDise ases", + "All en", + "Ġl as", + "Ġsh ards", + "ĠSu arez", + "ĠFriend ship", + "inter face", + "ĠSupp orters", + "add ons", + "46 2", + "ĠIm ran", + "ĠW im", + "Ġnew found", + "ĠM b", + "An imal", + "Ġd arling", + "and e", + "Ġrh y", + "ĠTw isted", + "pos al", + "yn ski", + "Var ious", + "× ľ", + "ĠK iw", + "uy omi", + "Ġwell being", + "ĠL au", + "an os", + "Ġunm ist", + "Ġmac OS", + "Ġrest room", + "ĠOl iv", + "ĠAir ways", + "Ġtimet able", + "9 80", + "Ġrad ios", + "v oy", + "ias co", + "Ġcloud y", + "ĠDraw ing", + "Any thing", + "Sy ria", + "ĠH ert", + "st aking", + "Ġun checked", + "Ġb razen", + "ĠN RS", + "69 7", + "onom ic", + "est ablish", + "Ġl eng", + "Ġdi agonal", + "ĠF ior", + "L air", + "ĠSt ard", + "Ġdef icient", + "jo ining", + "be am", + "Ġomn ip", + "Ġbl ender", + "Ġsun rise", + "Mo ore", + "ĠF ault", + "ĠCost ume", + "ĠM ub", + "Fl ags", + "an se", + "Ġpay out", + "ĠGovern ors", + "ĠD illon", + "ĠBan ana", + "N ar", + "Ġtra iled", + "Ġimperial ist", + "um ann", + "ats uki", + "4 35", + "ĠRoad s", + "Ġsl ur", + "ĠIde ally", + "Ġt renches", + "C trl", + "Ġmir rored", + "ĠZ el", + "ĠC rest", + "Comp at", + "ĠRoll s", + "sc rib", + "ĠTra ils", + "omet ers", + "w inter", + "Ġimm ortality", + "il ated", + "Ġcontrad icts", + "un iversal", + "ill ions", + "ĠM ama", + "opt im", + "AT URE", + "Ġge o", + "et ter", + "ĠCar lo", + "4 24", + "Ġcanon ical", + "ĠStrongh old", + "n ear", + "Ġperf ume", + "Ġorche stra", + "od iac", + "Ġup he", + "Ġreign ing", + "vers ive", + "Ġc aucuses", + "ĠD EM", + "Ġinsult ed", + "Ġ---- --", + "ĠCr ush", + "Ġroot ing", + "ĠWra ith", + "Ġwh ore", + "Ġto fu", + "C md", + "ĠB ree", + "Ġ$ _", + "Ġr ive", + "ĠAd vertising", + "Ġw att", + "ĠH O", + "Ġpersu asive", + "ĠParam eters", + "Ġobserv ational", + "ĠN CT", + "ĠMo j", + "ĠSal on", + "Ġtr unc", + "Ġexqu isite", + "ĠMar a", + "Ġpo op", + "ĠAN N", + "Ex c", + "ĠWonder ful", + "ĠT aco", + "Ġhome owner", + "ĠSmith sonian", + "orpor ated", + "mm mm", + "Ġlo af", + "ĠYam ato", + "ĠInd o", + "Ġcl inging", + "á s", + "Ġimm utable", + "h ub", + "Or ange", + "Ġfingert ips", + "ĠWood en", + "ĠK idd", + "ĠJ PM", + "ĠDam n", + "C ow", + "c odes", + "48 2", + "Ġiniti ating", + "ĠEl k", + "ĠCut ting", + "Ġabsent ee", + "ĠV ance", + "ĠLil ith", + "G UI", + "Ġobsc ured", + "Ġdwar ves", + "ĠCh op", + "ĠB oko", + "Val ues", + "Ġmult imedia", + "Ġbrew ed", + "Reg ular", + "CRIP TION", + "ĠMort al", + "Ġa pex", + "Ġtravel er", + "Ġbo ils", + "Ġspray ing", + "Rep resent", + "ĠStars hip", + "4 28", + "Ġdisappro val", + "Ġshadow y", + "Ġlament ed", + "ĠRe place", + "ĠFran ç", + "67 7", + "d or", + "Ġunst oppable", + "Ġcoh orts", + "gy n", + "ĠClass ics", + "ĠAm ph", + "Ġsl uggish", + "ĠAdd iction", + "ĠPad res", + "Ġins cription", + "Ġin human", + "min us", + "ĠJere miah", + "at ars", + "Ter ror", + "ĠT os", + "ĠSh arma", + "ast a", + "c atch", + "Ġpl umbing", + "ĠTim bers", + "Sh ar", + "H al", + "ĠO sc", + "Ġcou pling", + "hum ans", + "Ġsp onge", + "Ġid ols", + "ĠSp a", + "ĠAdv ocate", + "ĠBe ats", + "lu a", + "Ġtick ing", + "Ġload er", + "ĠG ron", + "8 10", + "Ġstim ulated", + "Ġside bar", + "ĠManufact urer", + "ore And", + "19 73", + "Ġpra ises", + "ĠFl ores", + "dis able", + "ĠElect rical", + "ra ise", + "E th", + "Ġmigr ated", + "Ġlect urer", + "K ids", + "ĠCa vern", + "Ġk ettle", + "Ġgly c", + "ĠMand ela", + "ĠF ully", + "å§ «", + "FIN EST", + "Ġsquee zing", + "ĠRy der", + "amp oo", + "oreAnd Online", + "Inst oreAndOnline", + "Buyable InstoreAndOnline", + "Ġcommem orate", + "ĠRamp age", + "Aust in", + "ĠSh roud", + "ĠRu ins", + "9 15", + "ĠK H", + "Ġwater front", + "ĠE SC", + "b aby", + "ĠC out", + "ĠEm blem", + "Ġequival ents", + "49 2", + "Un ique", + "ĠNiet zsche", + "brow ser", + "Ġim itation", + "ĠWere wolf", + "ĠKir in", + "ac as", + "' ,\"", + "Ġà ¾", + "Review ed", + "Ġc unt", + "Ġvo ic", + "ĠLen ovo", + "Ġbond ed", + "48 1", + "Ġinhib itors", + "Ġendeav ors", + "ĠHav ana", + "ĠSt out", + "ĠJ olly", + "A ctor", + "*/ (", + "Ġoccur rences", + "ĠT ens", + "Incre ased", + "ĠACT ION", + "Ġ ãĢĮ", + "ĠRank ings", + "ĠB reat", + "Ġ30 9", + "D ou", + "Ġimpact ing", + "ĠDuc hess", + "pre fix", + "Q B", + "Ġsummon ing", + "Ġbest owed", + "ĠKe pler", + "ĠPOW ER", + "c ube", + "ĠK its", + "ĠG rip", + "Ġop ium", + "Ġrep utable", + "t oc", + "ich ael", + "ĠR ipple", + "Ġcaf é", + "ĠZ oom", + "ĠBur ma", + "Ġwa ive", + "Ġst alls", + "Ġdem eanor", + "inc erity", + "Ġfluor ide", + "ĠSH OULD", + "Par is", + "Ġlong ing", + "Ġpl at", + "Ġgross ly", + "Ġbull s", + "Ġshowc asing", + "ex pected", + "ĠG addafi", + "engine ering", + "Re peat", + "ĠK ut", + "Ġconce ivable", + "Ġtrim med", + "osc ope", + "ĠCand idate", + "ĠT ears", + "rol og", + "Lew is", + "S UP", + "Ġroad map", + "Ġsal iva", + "Ġtrump et", + "Jim my", + "Ġmirac ulous", + "Ġcolon ization", + "Ġam put", + "ĠGN OME", + "ate ch", + "D ifferent", + "ĠE LE", + "ĠGovern ments", + "ĠA head", + "ãħĭ ãħĭ", + "word press", + "L IB", + "ĠIn clude", + "ĠDor othy", + "0 45", + "ĠColomb ian", + "Ġle ased", + "88 4", + "Ġde grading", + "ĠDa isy", + "i ations", + "Ġbapt ized", + "Ġsurn ame", + "co x", + "Ġblink ed", + "ãĥ ¢", + "Ġpoll en", + "Ġder mat", + "Ġre gex", + "ĠNich olson", + "ĠE ater", + "ç ľ", + "rad or", + "Ġnarrow er", + "Ġhur ricanes", + "Ġhalluc inations", + "r idden", + "ISS ION", + "ĠFire fly", + "Ġattain ment", + "Ġnom inate", + "Ġav ocado", + "ĠM eredith", + "Ġt s", + "Ġreve rence", + "Ġe uph", + "Ġcr ates", + "ĠT EXT", + "Ġ4 43", + "Ġ3 19", + "J SON", + "iqu ette", + "Ġshort stop", + "ic key", + "Ġpro pelled", + "Ġap i", + "ĠTh ieves", + "77 9", + "Ġovers aw", + "Ġcol i", + "ĠNic ola", + "Ġover cl", + "ik awa", + "ĠC yr", + "Ġ38 4", + "78 9", + "ĠAll ows", + "10 27", + "Det roit", + "TR Y", + "set up", + "ĠSocial ism", + "Sov iet", + "s usp", + "ĠAP R", + "ĠShut down", + "Ġal uminium", + "zb ek", + "ĠL over", + "GGGG GGGG", + "Ġdemocr acies", + "Ġ19 08", + "ĠMer rill", + "ĠFranco is", + "gd ala", + "Ġtraff ickers", + "ĠT il", + "ĠGo at", + "Ġsp ed", + "ĠRes erv", + "Ġpro d", + "55 2", + "Ġc ac", + "ĠUn iv", + "ĠSch we", + "Ġsw irling", + "ĠWild erness", + "ĠEgg s", + "Ġsadd ened", + "Ġarch aic", + "H yd", + "Ġexcess ively", + "B RE", + "Ġaer ospace", + "ĠVo ices", + "Cra ig", + "Ġign ited", + "In itially", + "ĠMc A", + "Ġhand set", + "Ġreform ing", + "Ġfrust rations", + "ĠDead pool", + "ĠBel ichick", + "ract or", + "ĠRagnar ok", + "ĠD rupal", + "ĠApp roximately", + "19 20", + "ĠHub ble", + "arm or", + "ĠSar as", + "ĠJon as", + "Ġnostalg ic", + "Ġfeas ibility", + "Sah aran", + "Ġorb iting", + "Ġ9 70", + "R u", + "Ġsh in", + "ĠInvestig ators", + "Ġinconsist encies", + "ĠP AN", + "B G", + "Ġgraz ing", + "Ġdetect ors", + "ĠStart up", + "ĠFun ny", + "ĠNa omi", + "Consider ing", + "Ġh og", + "ut f", + "ce mic", + "Ġfort ified", + "ĠFun ctions", + "Ġcod ec", + "nut rition", + "H at", + "\" !", + "micro soft", + "55 8", + "ĠTh in", + "ĠA CE", + "Al ias", + "ĠO PS", + "p apers", + "P K", + "ãĢ İ", + "Ġimpro bable", + "N orthern", + "equ al", + "Ġlook out", + "Ġty res", + "ĠMod ified", + "ĠK op", + "Abs olutely", + "Ġbuild up", + "sil ver", + "Ġaud i", + "Ġgro tesque", + "ĠSab er", + "ĠPres byter", + "ON Y", + "Ġglac iers", + "ĠSho als", + "ĠK ass", + "ĠH RC", + "ĠNic ol", + "ĠL unch", + "ĠF oss", + "âĸ Ĵ", + "AD RA", + "ĠOne Plus", + "o ing", + "ground s", + "Ġincident al", + "Ġdatas ets", + "68 9", + "ĠClarks on", + "Ġassemb ling", + "ĠCorrect ions", + "Ġdrink ers", + "Ġqual ifiers", + "Ġle ash", + "Ġunf ounded", + "ĠH undred", + "Ġkick off", + "T i", + "Ġrecon cil", + "ĠGr ants", + "ĠCompl iance", + "ĠDexter ity", + "Ġ19 06", + "w arn", + "D allas", + "Max imum", + "n ard", + "av ia", + "be aut", + "ens itivity", + "tr ace", + "Ġpione ers", + "ĠF ract", + "ãĢ ı", + "Ġpre cept", + "Ġgloss y", + "ĠI EEE", + "Ac ross", + "Ġ6 80", + "S leep", + "che on", + "Ġsatir ical", + "ĠMin otaur", + "ĠCla ude", + "Ġr é", + "ape go", + "Ġcar rot", + "ĠSem in", + "ino a", + "Ġz o", + "Ind ependent", + "Ġdiagn oses", + "ĠC ue", + "M AR", + "Ġrend ition", + "ĠK ik", + "Ġpath ology", + "Ġselect s", + "Link edIn", + "Ġass ay", + "ĠD res", + "Ġtext ual", + "post ed", + "IT AL", + "ĠM aul", + "N eal", + "Ġinter connected", + "Ġerr atic", + "ĠVir us", + "Ġ5 30", + "Ġenvironmental ists", + "ĠP helps", + "Ġeng agements", + "ĠIN ST", + "Ġeconom ical", + "nox ious", + "Ġg earing", + "izz y", + "Ġfavor ably", + "ĠMcG ill", + "T erm", + "Ġh anged", + "Ġball park", + "ĠRe yes", + "Ġbe ware", + "ĠP sal", + "ĠMass acre", + "q i", + "Ġin accessible", + "acly sm", + "Ġfr ay", + "ill ac", + "Ġbitter ly", + "ĠCert ification", + "Mich igan", + "Ġir respective", + "al ore", + "Em pty", + "Ġendorse ments", + "Ġund et", + "f g", + "equ ipped", + "Ġmerc iless", + "ĠC ust", + "Ġimm ature", + "Ġvou cher", + "ĠBlack well", + "Ñ ı", + "h awk", + "dis ciplinary", + "ile e", + "ĠMak oto", + "ĠD ude", + "ãĥĩ ãĤ£", + "Y ears", + "Ġin ver", + "Ġsh aman", + "ĠY ong", + "ip el", + "ell en", + "ĠCath y", + "br ids", + "Ġs arc", + "65 1", + "N ear", + "Ġground work", + "Ġam az", + "Ġ4 15", + "ĠHunting ton", + "hew s", + "ĠB ung", + "Ġarbit rarily", + "ĠW it", + "ĠAl berto", + "Ġdis qualified", + "best os", + "46 1", + "Ġp c", + "Ġ28 4", + "ro bat", + "Rob in", + "Ġh ugs", + "ĠTrans ition", + "ĠOcc asionally", + "Ġ3 26", + "ĠWh ilst", + "ĠLe y", + "Ġspaces hip", + "cs v", + "Ġun successfully", + "ĠA u", + "le ck", + "ĠWing ed", + "ĠGrizz lies", + ". �", + "Ġne arer", + "ĠSorce ress", + "ĠInd igo", + "El se", + "8 40", + "let es", + "Co ach", + "Ġup bringing", + "ĠK es", + "Ġseparat ist", + "Ġrac ists", + "Ġch ained", + "Ġabst inence", + "lear ning", + "Ġrein stated", + "Ġsymm etry", + "Ġremind ers", + "ĠChe vy", + "Ġm ont", + "Ġexempl ary", + "ĠT OR", + "Z X", + "Ġqual itative", + "ĠSt amp", + "ĠSav annah", + "ĠRoss i", + "Ġp aed", + "Ġdispens aries", + "ĠWall s", + "ĠCh ronic", + "Ġcompliment ary", + "ĠBeir ut", + "Ġ+ ---", + "igs list", + "Ġcrypt ographic", + "mas ters", + "ĠCap itals", + "Ġmax imal", + "Ġent ropy", + "Point s", + "Ġcombat ants", + "l ip", + "ĠGl ob", + "ĠB MC", + "ph ase", + "th ank", + "HT TP", + "Ġcomm uter", + "Ġ\\( \\", + ".. /", + "ĠReg ener", + "ĠDO I", + "ĠActiv ision", + "Ġsl it", + "os al", + "RE M", + "Ġch ants", + "Y u", + "Ke ys", + "Bre xit", + "ĠFor ced", + "Ari zona", + "Ġsquad ron", + "IS O", + "ĠMal one", + "Ġ3 38", + "Ġcontrast ing", + "Ġt idal", + "Ġlib el", + "Ġimpl anted", + "Ġupro ar", + "ĠC ater", + "Ġpropos itions", + "M anchester", + "ĠEuro s", + "it amin", + "G il", + "ĠEl ven", + "ĠSe ek", + "ĠB ai", + "Ġredevelop ment", + "ĠTown s", + "ĠL ub", + "! \",", + "al on", + "K rist", + "Ġmeas urable", + "Ġimagin able", + "Ġapost les", + "Y N", + "7 60", + "Ġster oid", + "Ġspecific ity", + "ĠL ocated", + "ĠBeck er", + "ĠE du", + "ĠDiet ary", + "uts ch", + "ĠMar ilyn", + "Ġbl ister", + "ĠM EP", + "ĠK oz", + "ĠC MS", + "y ahoo", + "ĠCar ney", + "Ġbo asting", + "ĠC aleb", + "By te", + "read s", + "ad en", + "Pro blem", + "ĠWood ward", + "S we", + "S up", + "ĠK GB", + "Set up", + "Ġtac it", + "Ġret ribution", + "Ġd ues", + "ĠM ü", + ". ?", + "ä¸ Ń", + "p ots", + "Ġcame o", + "ĠP AL", + "educ ation", + "A my", + "like ly", + "g ling", + "Ġconstitution ally", + "ĠHam m", + "ĠSpe ak", + "Ġwid gets", + "br ate", + "Ġcra ppy", + "ĠI ter", + "Ġanticip ating", + "ĠB out", + "P ixel", + "ĠY ep", + "ĠLaur ie", + "Ġh ut", + "Ġbullet in", + "ĠSal vation", + "Ġch ats", + "ear able", + "Honest ly", + "AL TH", + "onse qu", + "c ult", + "isco very", + "ovy ch", + "Ġse lves", + "ĠSat oshi", + "S ounds", + "Ġconver gence", + "ĠRosen berg", + "19 74", + "Ġnas al", + "Ġfull est", + "Ġfer ocious", + "x us", + "ist e", + "AM S", + "Ġlobb ied", + "Ġso othing", + "ĠGun n", + "t oday", + "0 24", + "Ġinspir ational", + "ĠN BN", + "p b", + "g ewater", + "or ah", + "all owed", + "ĠCol iseum", + "Ġspecial izing", + "Ġinsane ly", + "ĠT ape", + "del ay", + "Ġt arn", + "ĠP ound", + "Ġmel anch", + "Ġdeploy ments", + "il and", + "Ġless en", + "Ġfur ry", + "ĠUE FA", + "Ġblood shed", + "ĠMe ier", + "ither ing", + "Ġhe irs", + "ĠJ aw", + "ax ter", + "ĠPublic ations", + "Ġal ters", + "int ention", + "ĠWinc hester", + "d etermination", + "ĠLif etime", + "th in", + "Mon ster", + "7 80", + "Ġapprox imation", + "Ġsuper markets", + "ĠSecond s", + "or os", + "h uge", + "Ġb ribe", + "ĠLIM ITED", + "un ed", + "Ġmis interpret", + "ĠIn jury", + "Ġ3 67", + "Ġthreshold s", + "ĠCarn ival", + "Ġgastro intestinal", + "Ġguid eline", + "Ġde ceived", + "f eatures", + "Ġpurported ly", + "ĠRon nie", + "ĠNew t", + "Ġsp acious", + "as us", + "Ġsuperhero es", + "ĠCyn thia", + "le gged", + "k amp", + "ch io", + "Ġth umbnail", + "ĠShir ley", + "ill ation", + "Ġshe ds", + "ĠZ y", + "E PA", + "Ġdam s", + "Ġy awn", + "n ah", + "ĠPe ggy", + "ĠE rie", + "ĠJu ventus", + "ĠF ountain", + "r x", + "don ald", + "al bum", + "ĠComp rehensive", + "Ġc aching", + "ĠU z", + "ulner ability", + "ĠPrinc iple", + "ĠJ ian", + "ing ers", + "cast s", + "ĠOs iris", + "ch art", + "t ile", + "ĠTiff any", + "ĠPatt on", + "ĠWh ip", + "Ġovers ized", + "J e", + "ĠCind erella", + "ĠB orders", + "ĠDa esh", + "M ah", + "Ġdog ma", + "Ġcommun ists", + "v u", + "Coun cil", + "Ġfresh water", + "Ġw ounding", + "Ġdeb acle", + "Ġyoung ster", + "Ġthread ed", + "ĠB ots", + "ĠSav ings", + "ãģ Ĥ", + "ol ing", + "oh o", + "Ġillum ination", + "M RI", + "Ġlo osen", + "tr ump", + "ag ency", + "ur ion", + "Ġmoment arily", + "ĠCh un", + "ĠBud apest", + "ĠAl ley", + "D isk", + "Ġaston ished", + "ĠCon quer", + "ĠAccount ing", + "h aving", + "ĠWe in", + "ĠAl right", + "Ġrev olver", + "Ġdel usion", + "Ġrelic s", + "Ġad herent", + "qu ant", + "Ġhand made", + "or io", + "Ġcomb ating", + "c oded", + "Ġquad ru", + "re th", + "N ik", + "ĠTrib al", + "ĠMyster ious", + "Ġin hal", + "ĠWin ning", + "ĠClass ification", + "ch anged", + "Ġun ab", + "Ġsc orn", + "icip ated", + "w l", + "ond uctor", + "Ġrein forcing", + "ĠChild hood", + "an ova", + "Ġadventure r", + "Ġdoctor al", + "ĠStrateg ies", + "Ġengulf ed", + "ĠEnc ounter", + "Ġl ashes", + "Crit ical", + "ric ular", + "ĠU TF", + "oci ation", + "check ing", + "ĠConsult ing", + "Run time", + "per iod", + "ĠAs gard", + "Ġdist illed", + "ĠPas adena", + "ĠD ying", + "ĠCOUN TY", + "Ġgran ite", + "Ġsm ack", + "Ġparach ute", + "ĠS UR", + "Virgin ia", + "ĠF urious", + "78 7", + "ĠO kin", + "Ġcam el", + "ĠM bps", + "19 72", + "ĠCh ao", + "ĠC yan", + "j oice", + "ef er", + "ĠW rap", + "ĠDeb ate", + "S eg", + "Ġfore arm", + "ĠIgn ore", + "Ġtim estamp", + "Ġprob ing", + "ĠNo on", + "ĠGra il", + "f en", + "Ġdorm ant", + "ĠFirst ly", + "ĠE ighth", + "ĠH UN", + "ĠDes ire", + "or as", + "Girl s", + "ĠDes mond", + "z ar", + "am ines", + "O AD", + "exec ute", + "Ġbo obs", + "ĠAT L", + "_ (", + "Chel sea", + "Ġmasturb ation", + "ĠCo C", + "Ġdestroy er", + "ĠCh omsky", + "Ġsc atter", + "ĠAss ets", + "79 6", + "ĠC argo", + "Ġrecept ive", + "ĠSc ope", + "Ġmarket ers", + "Ġlaun chers", + "Ġax le", + "ĠSE A", + "se q", + "ĠM off", + "f inding", + "ĠGib bs", + "Georg ia", + "extreme ly", + "N J", + "Ġlab orers", + "st als", + "Ġmed iation", + "ĠH edge", + "at own", + "Ġi od", + "des pite", + "v ill", + "J ane", + "ex istence", + "Ġcoinc ided", + "ĠUt ilities", + "ĠChe ap", + "Ġlog istical", + "Ġcul mination", + "ĠNic otine", + "p ak", + "F older", + "Ġrod ents", + "st uff", + "Ġlaw fully", + "Ġreper to", + "io ch", + "j j", + "Dial ogue", + "HH HH", + "lic tion", + "Look s", + "Ġ29 7", + "Ġtur rets", + "ĠAb andon", + "Ġinc ess", + "ĠTraff ord", + "Ġcur led", + "Ġprefer ring", + "Ġprivat ization", + "Ġir resist", + "ĠP anda", + "ĠSh ake", + "ĠMc Gr", + "ãĥ Ħ", + "und ers", + "Ġdiscrim inated", + "Ġbart ender", + "I LE", + "Atl antic", + "Ġprop ensity", + "ĠW iz", + "ĠG im", + "con ference", + "Ġrein forces", + "G h", + "w agon", + "Ġe erie", + "F al", + "Ġhug ged", + "rac ist", + "R IC", + "F u", + "Ġf iller", + "ĠSt ub", + "Ġeng raved", + "ĠWrest le", + "Ġimagin ative", + "ĠPe er", + "ĠFact ors", + "an us", + "ĠDrac ula", + "mon itor", + "Ġrou ters", + "ib ia", + "ĠBoo lean", + "end ale", + "ĠSl aughter", + "ĠSh ack", + "R FC", + "ĠSpiel berg", + "S ax", + "ĠPH OTO", + "ĠCl over", + "ĠR ae", + "Dep ending", + "ĠMem or", + "ar am", + "Ġpier ced", + "Ġcur tains", + "v ale", + "ĠInqu isition", + "ĠP oke", + "Ġforecast ing", + "Ġcompl ains", + "S ense", + "ĠHer mes", + "isc overed", + "Ġb ible", + "ĠMor ph", + "Ġg erm", + "78 5", + "D ON", + "Ġcon gen", + "Ġcr ane", + "ĠD PR", + "Ġrespect fully", + "R oom", + "ĠN aw", + "ĠDal ai", + "re ason", + "ĠAng us", + "Educ ation", + "ĠTitan ic", + "Ë ľ", + "Ġo val", + "un ited", + "Ġthird s", + "Ġmoist ur", + "ĠC PC", + "M iami", + "Ġtent acles", + "ĠPol aris", + "ex c", + "ex clusive", + "ĠPra irie", + "Ġcol ossal", + "ĠBl end", + "sur prisingly", + "ÃŃ s", + "Ġindo ctr", + "Ġbas al", + "ĠMP EG", + "und o", + "Spl it", + "Develop ment", + "Ġlan tern", + "19 71", + "Ġprov ocation", + "Ġang uish", + "ĠB ind", + "ĠLe ia", + "duc ers", + "ipp y", + "conserv ancy", + "Ġinitial ize", + "ĠTw ice", + "ĠSu k", + "Ġpred ic", + "Ġdi ploma", + "Ġsoc iop", + "Ing redients", + "Ġhamm ered", + "ĠIr ma", + "Q aida", + "Ġglim ps", + "ĠB ian", + "Ġst acking", + "Ġf end", + "gov track", + "Ġun n", + "dem ocratic", + "ig ree", + "Ġ5 80", + "Ġ29 4", + "Ġstraw berry", + "ID ER", + "Ġcher ished", + "ĠH ots", + "Ġinfer red", + "Ġ8 08", + "ĠS ocrates", + "O regon", + "ĠR oses", + "ĠFO IA", + "Ġins ensitive", + "Ġ40 8", + "Recomm end", + "ĠSh ine", + "Ġpain staking", + "UG E", + "ĠHell er", + "ĠEnter prises", + "I OR", + "ad j", + "N RS", + "L G", + "Ġalien ated", + "Ġacknowled gement", + "ĠA UD", + "ĠRen eg", + "Ġvou chers", + "Ġ9 60", + "Ġm oot", + "ĠDim ensions", + "Ġc abbage", + "B right", + "g at", + "ĠK lu", + "Ġlat ent", + "Ġz e", + "ĠM eng", + "Ġdis perse", + "Ġpand emonium", + "H Q", + "Ġvirt uous", + "ĠLoc ations", + "ee per", + "prov ided", + "Ġse ams", + "ĠW T", + "iz o", + "PR OV", + "Ġtit anium", + "Ġrecol lection", + "Ġcr an", + "Ġ7 80", + "ĠN F", + "49 1", + "64 2", + "p acking", + "59 8", + "text ure", + "Sp ider", + "fre edom", + "cipl ed", + "ĠTAM ADRA", + "âĻ ¦", + "aut hent", + "ĠW ANT", + "r ified", + "Ġr ites", + "Ġuter us", + "k iss", + "Ġâī ¤", + "Ġsk illet", + "Ġdis enfranch", + "ĠGa al", + "Comp an", + "Ġage ing", + "gu ide", + "B alt", + "Ġiter ator", + "Ġdiscretion ary", + "t ips", + "Ġprim ates", + "ĠTechn ique", + "ĠPay ments", + "az el", + "ĠR OCK", + "stant ial", + "0 60", + "Ġd mg", + "ĠJack ets", + "ĠPlay off", + "Ġnurs ery", + "ĠSy mb", + "art on", + "Ġannex ation", + "Color ado", + "Ġco ils", + "ĠSh oes", + "âĦ¢ :", + "ĠRo z", + "COM PLE", + "ĠEve rest", + "ĠTri umph", + "J oy", + "G rid", + "à ¼", + "process or", + "ĠPros per", + "ĠSever us", + "ĠSelect ed", + "r g", + "ĠTay yip", + "St ra", + "Ġski ing", + "Ġ? )", + "Ġpe g", + "Tes la", + "Ġtime frame", + "Ġmaster mind", + "ĠN B", + "scient ific", + "ĠSh it", + "gener ic", + "IN TER", + "N UM", + "Ġst roll", + "ĠEn ix", + "ĠM MR", + "ĠE MS", + "m ovie", + "Ĥ ª", + "Ġminim izing", + "idd ling", + "Ġilleg itimate", + "Ġprot otyp", + "Ġpremature ly", + "Ġmanual s", + "obb ies", + "ĠCass idy", + "D EC", + "des ktop", + "Ġaer os", + "Ġscreen ings", + "Ġdeb ilitating", + "ĠGr ind", + "nature conservancy", + "Ġf ades", + "ter mination", + "assets adobe", + "F actor", + "Ġdefinitive ly", + "P oké", + "ap ult", + "ĠLaf ayette", + "C orn", + "ĠCor al", + "Ġstagn ant", + "T ue", + "Ġdissatisf action", + "G ender", + "Ġkid neys", + "ĠG ow", + "ĠDef eat", + "ĠAsh ton", + "Ġcart els", + "Ġfore closure", + "ĠExpl ore", + "stre ngth", + "ot in", + "Ġveterin arian", + "Ġf umble", + "Ġpar ap", + "ĠSt rait", + "r ils", + "Ġpr ick", + "ĠBerm uda", + "ĠAm munition", + "skin ned", + "Ġab ound", + "ĠB raz", + "Ġshar per", + "ĠAsc ension", + "Ġ9 78", + "Ġpreview s", + "Ġcommun ion", + "ĠX Y", + "Ġph ony", + "Ġnewcom er", + "Ġ3 32", + ".\" ,\"", + "Ġredist ribution", + "Prot ect", + "ĠSo f", + "K al", + "Ġlip stick", + "w orst", + "Ġtang led", + "Ġretrospect ive", + "int eger", + "Ġvolunte ering", + "Ġ19 07", + "Ġ --------------------", + "ic hen", + "Ġunve iling", + "Ġsen seless", + "Ġfisher ies", + "\\ -", + "Ġh inges", + "Ġcalcul us", + "My th", + "Ġund efeated", + "Ġoptim izations", + "Ġdep ress", + "Ġbill board", + "ĠY ad", + "ĠPy ramid", + "Is n", + "I de", + "Ġleg ion", + "ĠK ramer", + "ent anyl", + "Ġpenet rating", + "ĠHaw th", + "ĠPR ODUCT", + "ĠGer ard", + "ĠP act", + "ĠIn cluding", + "ĠEl ias", + "ĠEl aine", + "vis ual", + "Ġhum ming", + "Ġcond esc", + "ĠF asc", + "ä¸ Ĭ", + "Ġe galitarian", + "Ġdev s", + "ĠD ahl", + "O ps", + "D H", + "ĠB ounce", + "id ated", + "ald o", + "Ġrepublic an", + "Ġh amb", + "ĠS ett", + "ograph ies", + "CH APTER", + "Ġtrans sexual", + "Ġsky rocket", + "ans wer", + "Ġmark up", + "Ø ª", + "Ġhero ine", + "Comp are", + "ĠT av", + "Be ast", + "Ġsuccess ors", + "Ġna ïve", + "ĠBuck ley", + "st ress", + "me at", + "Ġdownload able", + "Ġindex ed", + "Ġsc aff", + "ĠL ump", + "ĠHom o", + "Stud io", + "In sp", + "Ġr acked", + "far ious", + "ĠPet ty", + "Ex ternal", + "Ġ19 09", + "W ars", + "com mit", + "put ers", + "Ġun ob", + "ĠEr r", + "ĠE G", + "ĠAl am", + "ĠSiber ia", + "ĠAtmosp heric", + "IS TER", + "ĠSatan ic", + "trans lation", + "ĠL oud", + "tra umatic", + "l ique", + "Ġreson ate", + "ĠWel ch", + "Ġspark ing", + "ĠT OM", + "t one", + "Ġout l", + "Ġhandc uffed", + "ĠSer ie", + "8 01", + "Ġland marks", + "ĠRee ves", + "Ġsoft ened", + "Ġdazz ling", + "ĠW anted", + "month s", + "Mag ikarp", + "Ġunt reated", + "ĠBed ford", + "M i", + "ĠDynam o", + "O re", + "79 5", + "Ġwrong ful", + "Ġl ured", + "Ġcort isol", + "Ġve x", + "d rawn", + "ile t", + "Download ha", + "ĠF action", + "Ġlab yrinth", + "Ġhij acked", + "w aters", + "er ick", + "Ġsuper iors", + "ĠRow ling", + "ĠGu inness", + "Ġt d", + "99 2", + "Ġune arthed", + "Ġcentr if", + "Ġsham eless", + "P od", + "ĠF ib", + "Ġ icing", + "Ġpredict or", + "Ġ29 2", + "fore station", + "con struct", + "C and", + "@ #", + "Ġag itated", + "Ġre pr", + "OV A", + "Ġkn itting", + "ĠLim a", + "Ġf odder", + "68 4", + "ĠPerson a", + "k l", + "7 01", + "Ġbreak up", + "á ¸", + "Ġapp alled", + "Ġantidepress ants", + "ĠSus sex", + "Har ris", + "ĠTher mal", + "ee ee", + "U pload", + "Ġg ulf", + "Ġdoor step", + "ĠSh ank", + "L U", + "ĠM EN", + "ĠP ond", + "s orry", + "Ġmis fortune", + "n ance", + "Ġb ona", + "M ut", + "Ġde graded", + "ĠL OG", + "ĠN ess", + "an imal", + "Ġa version", + "und own", + "Ġsupplement ed", + "ĠC ups", + "Ġ50 4", + "Ġdep rive", + "ĠSpark le", + "Å Ĥ", + "ĠMed itation", + "auth ors", + "ĠSab an", + "ĠN aked", + "air d", + "ĠMand arin", + "ĠScript ures", + "ĠPerson nel", + "ĠMahar ashtra", + "Ġ19 03", + "ĠP ai", + "ĠMir age", + "omb at", + "Access ory", + "Ġfrag mented", + "T ogether", + "Ġbelie vable", + "ĠGl adiator", + "al igned", + "ĠSl ug", + "M AT", + "Ġconvert ible", + "ĠBour bon", + "amer on", + "ĠRe hab", + "nt ax", + "Ġpowd ered", + "pill ar", + "Ġsm oker", + "ĠMans on", + "ĠB F", + "5 11", + "ĠGood ell", + "ĠD AR", + "m ud", + "g art", + "Ġob edient", + "ĠTrans mission", + "ĠDon ation", + "8 80", + "Ġbother ing", + "Material s", + "ãĤ ±", + "dest roy", + "Ġfore going", + "Ġanarch ism", + "ĠK ry", + "ice ps", + "Ġl ittered", + "ĠSch iff", + "Ġanecd otal", + "un its", + "Ġf ian", + "ĠSt im", + "ĠS OME", + "ĠInv aders", + "Ġbehaviour al", + "ĠVent ures", + "Ġsub lime", + "Ġfru ition", + "ĠPen alty", + "Ġcorros ion", + "¶ ħ", + "Ġlik ened", + "Ġbesie ged", + "ween ey", + "ĠCre ep", + "Ġlinem en", + "mult i", + "ic ably", + "ud der", + "Ġvital ity", + "Ġshort fall", + "ĠP ants", + "ap ist", + "H idden", + "ĠDro ps", + "med ical", + "Ġpron unciation", + "ĠN RL", + "Ġinsight ful", + "J V", + "ĠBe ard", + "ĠCh ou", + "Ġchar ms", + "Ġb ins", + "Ġamb assadors", + "ĠS aturdays", + "Ġinhib itor", + "ĠFr anch", + "6 01", + "', '", + "ĠCon or", + "art ney", + "ĠX peria", + "g rave", + "be es", + "ĠProtest ants", + "Ġso aking", + "ĠM andal", + "Ġph ased", + "Ġ6 60", + "Ġsc ams", + "Ġbuzz ing", + "ĠItal ians", + "ĠLoren zo", + "ĠJ A", + "Ġhes itated", + "Ġcl iffs", + "ĠG OT", + "ingu ishable", + "Ġk o", + "Ġinter ruption", + "Z ip", + "Lear ning", + "Ġundersc ores", + "ĠBl ink", + "K u", + "57 9", + "ĠAut ob", + "I RE", + "Ġwater ing", + "Ġpast ry", + "8 20", + "Ġvision ary", + "ĠTempl ar", + "awa ited", + "Ġpist on", + "Ġant id", + "current ly", + "Ġp ard", + "Ġw aging", + "Ġnob ility", + "ĠY us", + "Ġinject ing", + "f aith", + "ĠP ASS", + "å º", + "Ġret ake", + "ĠPR OC", + "Ġcat hedral", + "b ash", + "Ġwrest lers", + "Ġpartner ing", + "Ġn oses", + "Ġ3 58", + "Trans form", + "am en", + "Ġb outs", + "ĠId eal", + "ĠConstant in", + "Ġse p", + "ĠMon arch", + "att en", + "ĠPe oples", + "mod ified", + "Ġmor atorium", + "Ġpen chant", + "Ġoffensive ly", + "Ġprox ies", + "ok ane", + "ĠTaiwan ese", + "ĠP oo", + "ĠH OME", + "us ional", + "Ġver bs", + "ĠO man", + "vis ory", + "Ġpersu asion", + "Ġmult it", + "Ġsc issors", + "G ay", + "ow ay", + "oph ysical", + "l us", + "gn u", + "Ġap ocalyptic", + "Ġabsurd ity", + "Ġplay book", + "Ġautobi ography", + "I UM", + "Ġsne aking", + "ĠSim ulation", + "pp s", + "ell ery", + "Plan et", + "Ġright fully", + "Ġn iece", + "ĠN EC", + "ĠIP O", + "ĠDis closure", + "lean or", + "ous y", + "ST ER", + "Ġ28 2", + "Cru z", + "Ch all", + "64 3", + "ĠSurv ive", + "ĠF atal", + "ĠAm id", + "ap o", + "We apons", + "D EN", + "7 70", + "ĠGreen wald", + "Ġlin en", + "al os", + "Ġpollut ants", + "ĠPCI e", + "k at", + "Ġp aw", + "ĠK raft", + "C hem", + "ĠTermin ator", + "Ġre incarn", + "Ġ] [", + "ĠSe eds", + "Ġsilhou ette", + "ĠSt ores", + "Ġgro oming", + "ĠD irection", + "ĠIs abel", + "ĠBr idges", + "ðŁ ij", + "E ED", + "ĠM orsi", + "Ġval ves", + "ĠRank ed", + "ĠPh arma", + "ĠOrgan izations", + "Ġpenet rated", + "ĠRod ham", + "ĠProt oss", + "Ġove rest", + "Ġex asper", + "ĠT J", + "Ġ 000000", + "Ġtrick le", + "Ġbour bon", + "WH O", + "Ġw retched", + "Ġmicrosc opic", + "Ġcheck list", + "Ġad orned", + "R oyal", + "Ad minist", + "ĠRet irement", + "ĠHig hest", + "We ather", + "ile ge", + "Ġincre ments", + "ĠC osponsors", + "Ġmas se", + "ĠS inn", + "r f", + "Ġh ordes", + "as sembly", + "75 4", + "ĠNat asha", + "ĠTY PE", + "ĠGEN ERAL", + "Ġarr anging", + "Ġ40 7", + "l ator", + "Ġg lean", + "Ġdisc redited", + "Ġclin icians", + "UN E", + "Ġachie ves", + "ĠEm erson", + "com plex", + "= [", + "Ġprincip ally", + "Ġfra il", + "p icked", + "Ġthan king", + "Ġre cl", + "ĠL AST", + "Ġsupp ressing", + "il ic", + "Ġantidepress ant", + "ĠLis bon", + "Ġth or", + "Ġsp a", + "Ġking doms", + "ĠPear ce", + "em o", + "Ġpl ung", + "Ġdiv est", + "Ġ ********************************", + "b is", + "osp els", + "ad r", + "Sp irit", + "hall a", + "P ink", + "end ez", + "Ġresurrect ed", + "esc ape", + "ĠRosen stein", + "Ġge ological", + "Ġnecess ities", + "Ġcarn iv", + "ĠE lys", + "ĠBar ney", + "Ġ29 6", + "dig y", + "ST ON", + "D OWN", + "Ġmil estones", + "Ġk er", + "Ġdismant ling", + "Ġre prim", + "Ġcross ings", + "19 45", + "Ġpatri archy", + "Ġblasp hemy", + "Ġ3 59", + "met ry", + "ĠOb esity", + "ĠDiff erences", + "bl ocking", + "ãĥķ ãĤ¡", + "ich ita", + "ĠSab ha", + "ph alt", + "ĠCol o", + "ual a", + "effic ients", + "ĠMed ina", + "con sole", + "55 7", + "ĠHann ibal", + "ĠHab it", + "ĠF ever", + "Ġthen ce", + "Ġsyn agogue", + "Ġessential s", + "Ġw ink", + "ĠTr ader", + "ID A", + "ĠSp oiler", + "ĠIceland ic", + "ĠHay ward", + "Ġpe ac", + "Ġmal ice", + "Ġflash back", + "Ġth w", + "Ġlay offs", + "L iquid", + "Ġtro oper", + "Ġh inge", + "ĠRead ers", + "Ph ill", + "ĠB auer", + "Cre ated", + "Ġaud its", + "ac compan", + "Ġunsus pecting", + "ier a", + "6666 6666", + "Ġbro ch", + "Ġapprehend ed", + "ĠM alk", + "cer ning", + "ĠCod ex", + "O VER", + "M arsh", + "ĠD eng", + "ĠExp ression", + "Ġdisrespect ful", + "Ġasc ending", + "t ests", + "ĠPlaint iff", + "ster y", + "ĠAl ibaba", + "din and", + "ĠDem psey", + "Applic ations", + "mor al", + "Ġthrough put", + "Ġquar rel", + "Ġm ills", + "Ġhe mor", + "ĠC ASE", + "terror ist", + "st im", + "ifest yle", + "ro zen", + "CE PT", + "Ar k", + "u ci", + "lect ic", + "Ġirrit ating", + "she ets", + "A y", + "Ġrede emed", + "Ġhorn y", + "ĠTe ach", + "ĠS ear", + "dem ocracy", + "4 65", + "ĠRest ore", + "Ġstand by", + "ĠP is", + "iff in", + "Ġsleep y", + "Ġextr ater", + "Ġcompl iments", + "Fram eworks", + "Ġinstall s", + "Ġb anging", + "sur face", + "found land", + "Ġmetaph ysical", + "Ġ28 3", + "oul s", + "dev ices", + "Ar gs", + "ĠSac rifice", + "ĠMcC orm", + "es on", + "Cons ervative", + "ĠM ikhail", + "see ing", + "is ively", + "ĠRo oms", + "ĠGener ic", + "Ġenthusi astically", + "Ġgri pped", + "Ġcomed ic", + "ĠElectric ity", + "Ġgu errilla", + "Ġdec oration", + "ĠPerspect ive", + "Ġconsult ations", + "Ġun amb", + "Ġplag iar", + "Ġmagic ian", + "Ġe rection", + "ĠTour ism", + "or ied", + "ro xy", + "11 00", + "T am", + "Ī è", + "Î ³", + "× ª", + "ĠPred ators", + "Nit rome", + "Ġtelesc opes", + "project s", + "Ġun protected", + "Ġst ocked", + "ĠEnt reprene", + "nex pected", + "Ġwast ewater", + "V ill", + "Ġint imately", + "Ġi Cloud", + "ĠConst able", + "Ġspo of", + "Ġne farious", + "Ġfin s", + "Ġcens or", + "ĠMod es", + "ĠEs per", + "ar bon", + "Ġinter sections", + "Ġlaud ed", + "Ġphys i", + "Ġgener ously", + "ĠThe Nitrome", + "ĠTheNitrome Fan", + "Ġar isen", + "ĠÙ Ī", + "Ġg lands", + "ĠPav ilion", + "ĠGu pta", + "Ġuniform ly", + "Ġr amps", + "ri et", + "ĠWH EN", + "ĠVan essa", + "Ġrout ed", + "Ġlim p", + "ĠC PI", + "p ter", + "int uitive", + "Ġv aping", + "Ġexperiment ed", + "ĠOlymp us", + "ĠAm on", + "Ġsight ing", + "Ġinfiltr ate", + "ĠGentle man", + "Ġsign ings", + "ĠMe ow", + "ĠNav igation", + "che cks", + "4 33", + "Ġel apsed", + "ĠBulg arian", + "esp ie", + "ĠS OM", + "d uring", + "Ġsp ills", + "anc a", + "ĠPly mouth", + "M AL", + "Ġdomest ically", + "ĠWater gate", + "ĠF AM", + "k illed", + "ed ited", + "ĠYour self", + "Ġsynchron ization", + "ĠPract ices", + "ST EP", + "Ġgen omes", + "ĠQ R", + "not ice", + "Ġloc ating", + "z in", + "Ġ3 29", + "al cohol", + "Ġk itten", + "V o", + "Ġr inse", + "Ġgrapp le", + "ĠSc rew", + "ĠD ul", + "A IR", + "Ġle asing", + "ĠCaf é", + "Ġro ses", + "ĠRes pect", + "Ġmis lead", + "Ġperfect ed", + "Ġnud ity", + "Ġnon partisan", + "ĠCons umption", + "Report ing", + "Ġnu ances", + "Ġdeduct ible", + "ĠSh ots", + "Ġ3 77", + "Ġæ ľ", + "ano oga", + "Ben ef", + "ĠB am", + "ĠS amp", + "if ix", + "Ġgal van", + "ĠMed als", + "rad ius", + "Ġno bles", + "Ġe aves", + "igr ate", + "K T", + "ĠHar bour", + "u ers", + "Ġrisk ed", + "re q", + "Ġneuro t", + "get table", + "ain a", + "Rom ney", + "Ġunder pin", + "Ġlo ft", + "ĠSub committee", + "ĠMong ol", + "b iz", + "Ġmanif ests", + "ass isted", + "ĠG aga", + "Ġsy nergy", + "Ġreligious ly", + "ĠPre f", + "ĠG erry", + "T AG", + "ĠCho i", + "4 66", + "beh ind", + "ĠO u", + "Gold Magikarp", + "Ġhemor rh", + "R iver", + "Ġtend on", + "Ġinj ure", + "ĠF iona", + "Ġp ag", + "Ġag itation", + "|| ||", + "ur an", + "ĠE SA", + "Ġest eem", + "Ġdod ging", + "Ġ4 12", + "r ss", + "Ġce ases", + "ex cluding", + "Ġint akes", + "Ġinsert s", + "Ġemb old", + "ĠO ral", + "up uncture", + "4 11", + "ĠUn ified", + "ĠDe le", + "Ġfurn ace", + "ĠCoy otes", + "ĠBr ach", + "L abor", + "Ġhand shake", + "Ġbru ises", + "Gr ade", + "éĹ ĺ", + "ĠGram my", + "ile en", + "St ates", + "ĠScandinav ian", + "ĠKard ash", + "8 66", + "Ġeffort lessly", + "ĠDI RECT", + "ĠTH EN", + "ĠMe i", + "ert ation", + "19 68", + "Ġgro in", + "w itch", + "Requ irements", + "98 5", + "Ġroof s", + "Ġest ates", + "ĠH F", + "Ġha ha", + "Ġdense ly", + "ĠO CT", + "Ġpl astics", + "Ġincident ally", + "ĠTr acks", + "ĠTax es", + "Ġch anted", + "Ġforce ful", + "ĠBie ber", + "ĠK ahn", + "K ent", + "ĠC ot", + "lic ts", + "F ed", + "Ġhide ous", + "ĠVer d", + "ĠSynd icate", + "ĠIl legal", + "J et", + "ĠD AV", + "re asonable", + "c rew", + "Ġfundamental ist", + "Ġtruth ful", + "ĠJ ing", + "Ġl il", + "Ġdown ed", + "Ġen chanted", + "ĠPolic ies", + "ĠMcM aster", + "ĠH are", + "ides how", + "Ġpar ams", + "en cers", + "gorith m", + "Ġallow ances", + "Ġturb ulent", + "Ġcomplex ities", + "ĠK T", + "Ġ3 37", + "ĠGen etic", + "F UN", + "D oug", + "t ick", + "Ġg igs", + "ument hal", + "Ġpatriarch al", + "Ġcal c", + ", ...", + "Ġc out", + "ĠGu an", + "Ġpath ological", + "ĠR ivals", + "Ġunder rated", + "Ġflu orescent", + "ĠJ iu", + "arna ev", + "ĠQu an", + "Ġ4 29", + "Ġ à¨", + "M ario", + "Con struct", + "ĠC itation", + "ĠR acial", + "ĠR SA", + "ĠF idel", + "Ġ3 95", + "Person ally", + "C ause", + "à »", + "rad ical", + "in en", + "Ġvehement ly", + "ĠPap a", + "Ġintern ship", + "Ġfl akes", + "ĠRe ck", + "Luck ily", + "B ra", + "20 20", + "rav ings", + "R N", + "W onder", + "Ser iously", + "Ġre usable", + "Ġpoll uted", + "ĠP eng", + "le igh", + "ind le", + "Ġcircuit ry", + "ĠMad onna", + "ĠB ART", + "Res idents", + "att ribute", + "Phil adelphia", + "Cl ub", + "Ġplan ner", + "Ġfr antically", + "Ġfaith fully", + "ĠTerrit ories", + "ĠL AT", + "ĠAnders en", + "an u", + "ĠP ARK", + "ĠS ora", + "i age", + "ĠPlay offs", + "ĠG CC", + "4 27", + "Ġab norm", + "ĠL ever", + "Ġdisob edience", + "As ync", + "ĠShe a", + "V ert", + "Ġsk irts", + "ĠSaw yer", + "x p", + "Ġwors ening", + "Ġsc apego", + "ĠAng le", + "oth al", + "Ġtro ve", + "ĠSt y", + "ĠN guyen", + "mar ine", + "ide on", + "Dep ths", + "Bl og", + "ĠIll uminati", + "Ġtract s", + "Ġorgan ise", + "Ġo str", + "F s", + "Ġlever aging", + "ĠD aredevil", + "as ar", + "Ġl ang", + "Ġex termin", + "urs ions", + "ĠRom o", + "ãĤ¤ ãĥĪ", + "Ġcont ended", + "Ġencounter ing", + "ĠTable t", + "ĠAltern ate", + "sk ill", + "Ġswe ets", + "Ġco hesive", + "cap acity", + "Ġrep ud", + "Ġl izard", + "ro o", + "Ġpilgr ims", + "ĠR uff", + "ĠInstr ument", + "ĠLog o", + "uit ous", + "E H", + "Ġsales man", + "Ġank les", + "L ed", + "ĠPat ty", + "ud os", + "Own er", + "Ġdiscrep ancies", + "k j", + "M U", + "Ġuncond itional", + "Dragon Magazine", + "i ard", + "O ak", + "ĠConvers ation", + "be er", + "ĠOs aka", + "D elta", + "us ky", + "Ġsecret ion", + "Ġpl aza", + "Ġm ing", + "Ġde pletion", + "ĠM ous", + "ĠI TS", + "ĠH imal", + "ĠFle ming", + "Ġcyt ok", + "ĠH ick", + "Ġbat ters", + "ĠInt ellectual", + "6 75", + "é r", + "IS ION", + "ĠQu entin", + "ĠCh apters", + "ih adi", + "Ġco aster", + "WAY S", + "ĠL izard", + "ĠY or", + "and ering", + "S kin", + "ha ust", + "ab by", + "Ġportray ing", + "Ġwield ed", + "d ash", + "Ġprop onent", + "Ġr ipple", + "Ġgrap hene", + "Ġfly er", + "Ġrec urrent", + "Ġdev ils", + "Ġwater fall", + "æĺ ¯", + "go o", + "Text Color", + "Ġtam pering", + "IV ES", + "TR UMP", + "ĠAb el", + "ĠS AL", + "ĠHend ricks", + "ĠLu cius", + "b ots", + "Ġ40 96", + "IST ORY", + "Gu est", + "ĠN X", + "in ant", + "Ben z", + "ĠLoad ed", + "ĠCle ver", + "t reatment", + "Ġta vern", + "Ġ3 39", + "ĠT NT", + "ific antly", + "Tem perature", + "F el", + "Ġunder world", + "ĠJud ges", + "Ġ< +", + "Ġst ump", + "Ġoccup ancy", + "Ġab er", + "ĠF inder", + ") \",", + "ĠN unes", + "res et", + "in et", + "ect omy", + "Ġwell ness", + "ĠP eb", + "quart ered", + "and an", + "Ġneg atives", + "ĠTh iel", + "ĠCl ip", + "ĠL TD", + "Ġbl ight", + "Ġreperto ire", + "K yle", + "Ġqu er", + "ĠC es", + "Ġha pl", + "98 9", + "ĠTh ames", + "isc opal", + "Des k", + "ivari ate", + "ĠEx cellence", + "found ation", + "Ġâ ĩ", + "X i", + "Ġmyster iously", + "esty les", + "Ġper ish", + "ĠEng els", + "ĠDE AD", + "09 0", + "}} }", + "ĠUn real", + "Ġrest less", + "ID ES", + "orth odox", + "ĠInter mediate", + "Ġdin ners", + "ĠTr out", + "ĠSe ym", + "ĠHall s", + "og ged", + "Ġtraged ies", + "Ġdid nt", + "67 6", + "Ġail ments", + "Ġobserv able", + "ĠV ide", + "ad apt", + "ĠD usk", + "Ġprofessional ism", + "ĠPres cott", + "ĠInd ies", + "p ox", + "ĠMe hran", + "W ide", + "Ġend emic", + "ĠPar an", + "B ird", + "Ġped als", + "ĠI U", + "ĠAdam ant", + "ĠH urt", + "Ġcorrel ates", + "urd en", + "Ġspons oring", + "cl imate", + "ĠUnivers ities", + "ĠK not", + "enn es", + "ĠDam ian", + "ĠAx el", + "S port", + "Ġbar b", + "ĠS no", + "sh own", + "ste en", + "ud ence", + "Ġnon violent", + "Ġhom ophobia", + "Ġbiom ass", + "ĠDet ail", + "Ġsrf N", + "ĠT une", + "accompan ied", + "I ENCE", + "Al bert", + "ĠMong o", + "z x", + "ĠCer berus", + "or bit", + "c ens", + "Ġsl ay", + "SH ARE", + "H Y", + "Ġb rawl", + "ĠPro be", + "Ġnonex istent", + "ĠClare nce", + "ĠBlack burn", + "Ġport als", + "ĠR ita", + "ĠRem ain", + "ĠLe vant", + "Ġtrick ed", + "ĠF erry", + "aver ing", + "ĠStraw berry", + "ĠAn swers", + "Ġhorrend ous", + "ĠA man", + "Supp lement", + "ĠT oad", + "Ġpe eled", + "Ġman oeuv", + "ĠU zbek", + "mond s", + "ĠH ector", + "Ġ40 2", + "pe es", + "fix es", + "Ġd j", + "Ġres umes", + "Ġaccount ant", + "Ġadvers ity", + "Ġham pered", + "ĠL arson", + "Ġd oping", + "part s", + "H ur", + "Ġbe arded", + "Ġy r", + "ĠPlug in", + "å¥ ³", + "Ġ/ **", + "rol ley", + "Ġwaters hed", + "ĠSub mission", + "if lower", + "AS C", + "Ġcho ir", + "Ġsculpt ures", + "m A", + "incre asing", + "ai i", + "Ġsne akers", + "Ġconfront s", + "ĠEle phant", + "ĠEl ixir", + "Ġrec al", + "ĠT TL", + "w idget", + "ĠW ax", + "ĠGr ayson", + "Ġha irst", + "Ġhumili ated", + "ĠWAR N", + "app iness", + "ĠT TC", + "F uel", + "Ġpol io", + "Ġcomplex es", + "Ġbab e", + "ĠX IV", + "P F", + "). [", + "P arts", + "Ġ4 35", + "M eg", + "ĠY ards", + "ĠAL P", + "Ġy ells", + "Ġprin ces", + "Ġbull ies", + "ĠCapital ism", + "ex empt", + "FA Q", + "ĠSp onge", + "ĠAl a", + "Ġpleas antly", + "Ġbu f", + "Ġden ote", + "Ġunp ublished", + "Ġkne eling", + "asc a", + "Ġl apse", + "al ien", + "99 4", + "Ġrefere es", + "ĠLaw yers", + "S anta", + "Ġpuzz ling", + "ĠProm etheus", + "ĠPh araoh", + "ĠDel ay", + "Ġfacilit ates", + "ĠC ES", + "Ġjew els", + "Ġbook let", + "ond ing", + "Ġpolar ization", + "ĠMor an", + "ĠSal ad", + "ĠS OS", + "ĠAdv ice", + "PH OTOS", + "IC AN", + "iat ures", + "ex press", + "ĠWonder land", + "ĠC ODE", + "ĠCL ASS", + "9 75", + "Ġg rep", + "ĠD iesel", + "ĠGl ac", + "! ?\"", + "Ġr m", + "o ine", + "disc rimination", + "ĠN urse", + "m allow", + "Ġv ortex", + "ĠCons ortium", + "Ġlarge Download", + "stra ight", + "augh lin", + "G rad", + "Ġpublic ized", + "ĠW aves", + "ĠRed d", + "Ġfest ivities", + "ĠM ane", + "ar ov", + "Ġfleet ing", + "ĠDr unk", + "ug en", + "C ele", + "Ġchromos omes", + "ĠD OT", + "-+-+ -+-+", + "Ġbus iest", + "ĠBe aver", + "Sy rian", + "ĠK yr", + "k as", + "ĠCross Ref", + "19 50", + "76 01", + "Ġrepe aling", + "ĠWin ners", + "ĠMac ro", + "ĠD OD", + "bl ance", + "S ort", + "64 1", + "Ġmet re", + "ĠD irk", + "Ġgo ggles", + "Ġdraw backs", + "Ġcomplain ant", + "Ġauthor izing", + "Ġantit rust", + "oper ated", + "Ġm ah", + "Ġexagger ation", + "Am azing", + "ĠSer aph", + "Ġha ze", + "w ow", + "Ġextingu ished", + "Ġcan yon", + "ĠB osh", + "Ġv ents", + "Ġsc rape", + "Cor rect", + "4 26", + "Ġav g", + "Dem and", + "ĠâĪ ¼", + "Ġmicrobi ota", + "\"} ],\"", + "ĠSt ev", + "B io", + "ĠPlan es", + "Ġsuggest ive", + "Ġdec ipher", + "ĠRefuge e", + "ĠKe jriwal", + "ĠGreen peace", + "Ġdecl ass", + "ĠSound ers", + "Ġth o", + "Ġdec rypt", + "Ġbr ushing", + "ĠJane iro", + "ip op", + "S i", + "8 77", + "ĠGeoff rey", + "Ġc pu", + "ĠHaz el", + "Ġview points", + "Ġcris py", + "ĠNot ification", + "Ġsold er", + "ĠMod est", + "ĠHem isphere", + "Ġcass ette", + "in cludes", + "Ġident ifiers", + "ĠC ALL", + "in cent", + "T odd", + "ĠSwe ep", + "Ġ3 34", + "b oss", + "Ġsm ir", + "gin x", + "Ġtown ship", + "Ġg rieving", + "ĠMos que", + "Net flix", + "AS ED", + "ĠMillenn ials", + "oc om", + "19 67", + "Ġbold ly", + "s leep", + "Ġes che", + "arij uana", + "Ġsw irl", + "ĠPen al", + "Ġneglig ent", + "ĠStephen son", + "K ER", + "ĠZ oro", + "ris is", + "Ġlocal ization", + "ĠSeym our", + "ĠAng lic", + "red itation", + "prot ection", + "ĠPa ige", + "Ġo mit", + "ĠR ousse", + "ĠT ub", + "Ġinv itations", + "t ty", + "Ġm oss", + "ph ysical", + "C redits", + "Ġan archy", + "Ġchild care", + "Ġl ull", + "ĠM ek", + "ĠL anguages", + "lat est", + "ĠSan ford", + "Ġus ability", + "Ġdiff use", + "ĠD ATA", + "Ġsp rites", + "ĠVeget a", + "ĠProm otion", + "ãĥ¼ ãĤ¯", + "rict ing", + "z ee", + "Tur kish", + "ĠTD s", + "pro ven", + "57 1", + "Ġsmug glers", + "707 10", + "Ġreform ed", + "ĠLo is", + "Ġun fl", + "ĠWITH OUT", + "ĠReturn ing", + "ann ie", + "ĠTom as", + "Fr anc", + "ĠProf it", + "ĠSER V", + "ĠR umble", + "ik uman", + "es an", + "Ġt esters", + "Ġgad get", + "Ġbrace let", + "ĠF SA", + "comp onent", + "Ġparamed ics", + "Ġj an", + "ĠRem em", + "ĠSk inner", + "Ġl ov", + "ĠQu ake", + "rom a", + "Ġfl ask", + "Pr inc", + "Ġover power", + "Ġlod ging", + "ĠK KK", + "ret te", + "Ġabsor bs", + "w rote", + "Ġ ,\"", + "K ings", + "ĠH ail", + "ĠFall ing", + "xt ap", + "ĠHel ena", + "ire ns", + "L arry", + "Ġpamph let", + "ĠC PR", + "G ro", + "ĠHirosh ima", + "Ġhol istic", + "\". [", + "Ġdet achment", + "Ġas pire", + "Ġcompl icit", + "ĠGreen wood", + "Ġresp awn", + "ĠSt upid", + "ĠFin ished", + "f al", + "b ass", + "Ġab hor", + "Ġmock ery", + "ĠFe ast", + "VID EO", + "Ġcon sec", + "ĠHung ry", + "P ull", + "ĠH ust", + "it ance", + "? ãĢį", + ") --", + "ĠPar allel", + "con v", + "4 69", + "ha ar", + "w ant", + "P aper", + "m ins", + "ĠTor o", + "ĠTR UMP", + "ĠR ai", + "D W", + "ĠW icked", + "ĠL ep", + "Ġfun ky", + "Ġdetrim ent", + "ios is", + "ache v", + "Ġde grade", + "im ilation", + "Ġret ard", + "Ġfrag mentation", + "Ġcow boy", + "ĠY PG", + "ĠH AL", + "Parent s", + "ĠS ieg", + "ĠStra uss", + "ĠRub ber", + "× IJ", + "Fr ag", + "Ġp t", + "Ġoption ally", + "ĠZ IP", + "ĠTrans cript", + "ĠD well", + "88 2", + "M erc", + "ĠM OT", + "ãĥ¯ ãĥ³", + "Ġhun ts", + "Ġexec utes", + "In cludes", + "Ġacid ic", + "ĠRespons ibility", + "ĠD umb", + "we i", + "And erson", + "ĠJas per", + "ight on", + "abs olutely", + "Ad ult", + "Ġpl under", + "Mor ning", + "ĠT ours", + "ĠD ane", + "Î º", + "ĠT EST", + "ĠG ina", + "Ġcan ine", + "aw an", + "Ġsocial ists", + "ĠS oda", + "Ġimp etus", + "ĠSupplement ary", + "oli ath", + "ĠKinn ikuman", + "mitted ly", + "second s", + "Ġorganis ers", + "Ġdocument aries", + "Vari able", + "GRE EN", + "Ġres orts", + "Ġbr agging", + "Ġ3 68", + "Art ist", + "w k", + "bl ers", + "Un common", + "ĠRet rieved", + "Ġhect ares", + "Ġtox in", + "r ank", + "Ġfaith s", + "ĠG raphic", + "Ġve c", + "ĠL IA", + "Af rican", + "Ġard ent", + "end iary", + "L ake", + "ĠD OS", + "cient ious", + "ĠOk awaru", + "ĠAll y", + "ĠTim eline", + "D ash", + "ĠI c", + "contin ue", + "Ġt idy", + "Ġinstinct ively", + "ĠP ossibly", + "ĠOut door", + "ĠWould n", + "Ġl ich", + "ĠBr ay", + "ĠA X", + "Ġà ī", + "Ġ+ #", + "\\ '", + "Direct ory", + "ab iding", + "Ġf eral", + "ic ative", + "but t", + "Ġper verse", + "S alt", + "Ġwar ped", + "Ġnin eteen", + "Ġcabin ets", + "Ġsrf Attach", + "ĠSl oan", + "Ġpower ing", + "reg ation", + "F light", + "se vere", + "Ġst ren", + "Ġc og", + "ap ache", + "Ġâ Ŀ", + "Ġcaf eteria", + "p aces", + "ĠGrim oire", + "uton ium", + "Ġr aining", + "Ġcir cling", + "Ġlineback ers", + "c redit", + "Ġrep atri", + "ĠCam den", + "lic ense", + "Ġly ric", + "Ġdescript or", + "Ġval leys", + "Ġre q", + "Ġback stage", + "ĠPro hibition", + "ĠK et", + "Op ening", + "S ym", + "æĸ ¹", + "Ġserv ings", + "Ġoverse en", + "Ġaster oids", + "ĠMod s", + "ĠSpr inger", + "ĠCont ainer", + "è »", + "ĠM ens", + "Ġmult im", + "Ġfire fighter", + "pe c", + "Ġchlor ine", + "Ð ¼", + "end i", + "Ġsp aring", + "Ġpolyg amy", + "ĠR N", + "ĠP ell", + "Ġt igers", + "Ġflash y", + "ĠMad ame", + "S word", + "Ġpref rontal", + "Ġpre requisite", + "uc a", + "Ġw ifi", + "Ġmiscon ception", + "Ġharsh ly", + "ĠStream ing", + "ot om", + "ĠGiul iani", + "foot ed", + "Ġtub ing", + "ind ividual", + "z ek", + "n uclear", + "m ol", + "Ġright ful", + "49 3", + "Ġspecial ization", + "Ġpassion ately", + "ĠVel ocity", + "ĠAv ailability", + "T enn", + "Ġl atch", + "ĠSome body", + "Ġhel ium", + "cl aw", + "Ġdi pping", + "XX X", + "Ġinter personal", + "7 10", + "Ġsub ter", + "Ġbi ologists", + "ĠLight ing", + "Ġopt ic", + "Ġden im", + "end on", + "ĠC orm", + "Ġ3 41", + "ĠC oup", + "Ġfear less", + "Ġal ot", + "ĠCliff ord", + "ĠRun time", + "ĠProv ision", + "up dated", + "lene ck", + "Ġneur on", + "Ġgrad ing", + "ĠC t", + "sequ ence", + "in ia", + "con cept", + "Ġro aring", + "ri val", + "ĠCaucas ian", + "Ġmon og", + "key es", + "Ġappell ate", + "Ġlia ison", + "EStream Frame", + "ĠPl um", + "! .", + "Ġsp herical", + "Ġper ished", + "Ġbl ot", + "Ġben ches", + "Ġ4 11", + "Ġpione ered", + "Ġhur led", + "Jenn ifer", + "ĠYose mite", + "Ch air", + "Ġreef s", + "Ġelect or", + "ĠAnt hem", + "65 2", + "Ġun install", + "Ġimp ede", + "Ġbl inking", + "Ġgot o", + "Dec re", + "A ren", + "Ġstabil ization", + "ĠDis abled", + "ĠYanuk ovych", + "Ġoutlaw ed", + "ĠVent ura", + "ten ess", + "Ġplant ation", + "Ġy acht", + "ĠHu awei", + "Ġsol vent", + "Ġgr acious", + "Ġcur iously", + "Ġcapac itor", + "Ġc x", + "ĠRef lex", + "Ph ys", + "ĠC f", + "pt in", + "cons ervative", + "Ġinv ocation", + "c our", + "F N", + "ĠNew ly", + "H our", + "As ian", + "ĠLe ading", + "ĠAer ospace", + "An ne", + "Ġpre natal", + "Ġdeterior ating", + "H CR", + "ĠNorm andy", + "ol ini", + "ĠAm bro", + "9 10", + "Ġset backs", + "ĠT RE", + "Ġs ig", + "ĠSc ourge", + "59 7", + "79 8", + "Game play", + "Ġm sec", + "M X", + "Ġprice y", + "ĠL LP", + "aker u", + "Ġover arching", + "ĠB ale", + "Ġworld ly", + "Cl ark", + "Ġscen ic", + "Ġdisl iked", + "ĠCont rolled", + "T ickets", + "ĠE W", + "ab ies", + "ĠPl enty", + "Non etheless", + "Ġart isan", + "Trans fer", + "ĠF amous", + "Ġinf ield", + "ble y", + "Ġunres olved", + "ĠML A", + "ãĤ Ĥ", + "Cor rection", + "Ġdemocr at", + "ĠMore no", + "ro cal", + "il ings", + "Ġsail or", + "Ġr ife", + "h ung", + "Ġtrop es", + "Ġsn atched", + "ĠL IN", + "ĠB ib", + "ES A", + "ĠPre v", + "ĠCam el", + "run time", + "Ġob noxious", + "4 37", + "Ġsum mers", + "Ġunexpl ained", + "ĠWal ters", + "cal iber", + "Ġg ull", + "ĠEnd urance", + "ä½ ľ", + "Ġ3 47", + "Ir ish", + "Ġaer obic", + "Ġcr amped", + "ĠHon olulu", + "à ©", + "us erc", + "ec ast", + "AC Y", + "ĠQu ery", + "ãĤ¹ ãĥĪ", + "Bet a", + "Ġsuscept ibility", + "ĠSh iv", + "ĠLim baugh", + "Ġà ĸ", + "ĠN XT", + "ĠM uss", + "ĠBrit ons", + "ES CO", + "EG IN", + "Ġ% %", + "Ġsec ession", + "ĠPat ron", + "ĠLu a", + "n aires", + "ĠJPM organ", + "us b", + "ocy te", + "Ġcouncill ors", + "ĠLi ang", + "f arm", + "Ġnerv ously", + "Ġattract iveness", + "ĠK ov", + "j ump", + "Pl ot", + "Ġst ains", + "ĠStat ue", + "ĠApost les", + "he ter", + "ĠSUP PORT", + "Ġoverwhel m", + "Y ES", + "Ġ29 1", + "d ensity", + "Ġtra pping", + "M it", + "Ġf ide", + "ĠPam ela", + "atl antic", + "Dam n", + "Ġp ts", + "OP A", + "Ġserv icing", + "Ġoverfl owing", + "ul o", + "ĠE rit", + "t icket", + "light ing", + "ĠH mm", + "ãĥ¼ ãĥ«", + "im oto", + "Ġchuck le", + "4 23", + "ãģ ķ", + "sh ape", + "Ġque ues", + "Ġanch ors", + "ãĤ¼ ãĤ¦ãĤ¹", + "F er", + "Ġaw oke", + "Ġ6 66", + "h ands", + "Ġdiver gence", + "Ġ50 5", + "T ips", + "Ġdep ot", + "Ġske w", + "ĠDel iver", + "op ot", + "Ġdiv ul", + "ĠE B", + "uns igned", + "ĠUn i", + "X box", + "Ġfor ks", + "Ġ7 02", + "å ¯", + "Ġpromot ers", + "ĠV apor", + "Ġlev ied", + "sl ot", + "Ġpig ment", + "Ġcyl inders", + "C RE", + "Ġsn atch", + "Ġperpet ually", + "Ġl icking", + "ĠFe et", + "ĠKra ken", + "ĠHold en", + "ĠCLS ID", + "m r", + "Ġproject or", + "Ġden otes", + "Ġchap el", + "ĠTor rent", + "b ler", + "R oute", + "ĠDef endant", + "ĠPublisher s", + "ĠM ales", + "ĠInn ov", + "ĠAg ility", + "rit er", + "ty mology", + "st ores", + "L ind", + "Ġf olly", + "ĠZur ich", + "B le", + "Ġnurt ure", + "Ġcoast line", + "uch in", + "D omin", + "Ġfri vol", + "ĠCons olid", + "res ults", + "M J", + "Ġphyl ogen", + "Ġha uled", + "ĠW iley", + "ĠJess ie", + "ĠPrep are", + "ĠE ps", + "Ġtreasure r", + "I AS", + "Ġcolon ists", + "Ġin und", + "ĠWW F", + "ĠCon verted", + "6 000", + "out side", + "ĠApp earance", + "ĠRel ic", + "ĠM ister", + "s aw", + "Ġresult ant", + "Ġadject ive", + "ĠLaure l", + "ĠHind i", + "b da", + "Pe ace", + "Ġreb irth", + "Ġmembr anes", + "Ġforward ing", + "Ġcoll ided", + "ĠCar olyn", + "K ansas", + "5 99", + "ĠSolid GoldMagikarp", + "Be ck", + "Ġstress ing", + "ĠGo o", + "ĠCooper ative", + "Ġf s", + "ĠAr chie", + "L iter", + "ĠK lopp", + "J erry", + "Ġfoot wear", + "War ren", + "Ġsc ree", + "h are", + "Under standing", + "P ed", + "Ġanth ology", + "ĠAnn ounce", + "M ega", + "Ġflu ent", + "Ġbond age", + "ĠDisc ount", + "il ial", + "C art", + "ĠNight mares", + "Sh am", + "ĠB oll", + "uss ie", + "H ttp", + "Atl anta", + "Ġun recogn", + "ĠB id", + "Ġunder grad", + "Ġforg iving", + "ĠGl over", + "AAAA AAAA", + "4 45", + "V G", + "pa io", + "kill ers", + "Ġrespons ibly", + "Ġmobil ize", + "Ġeffect ed", + "ĠL umin", + "Ġk ale", + "Ġinfring ing", + "ann ounced", + "Ġf itt", + "b atch", + "ĠT ackle", + "ĠL ime", + "ĠAP P", + "uke mia", + "Ġrub y", + "Ġex oner", + "ĠCas ual", + "0 70", + "Ġpel vic", + "Ġautom ate", + "ĠK ear", + "ĠCoast al", + "Ġcre ed", + "Ġbored om", + "ĠSt un", + "ri ott", + "Ĥ İ", + "Ġregener ate", + "Ġcomed ians", + "ĠOP ER", + "Sp ons", + "id ium", + "on is", + "L ocated", + "05 7", + "Ġsusp ense", + "ĠD ating", + "C ass", + "Ġneoc ons", + "ĠShin zo", + "Ġaw oken", + "ch rist", + "ĠMess ages", + "att led", + "ĠSpr ay", + "ĠSp ice", + "C W", + "Ġshield ing", + "ĠG aul", + "Am id", + "Ġparam ilitary", + "Ġmult if", + "ĠTan ner", + "il k", + "Ġgodd amn", + "g ements", + "Ġbe friend", + "m obi", + "Ġ3 88", + "fold er", + "acc a", + "Ġins in", + "g ap", + "N ev", + "fif th", + "Ġpsychiat ry", + "b anks", + "TH IS", + "Ġhar b", + "ac qu", + "Ġfac ade", + "ĠPower Point", + "80 3", + "Ġbl uff", + "Sh ares", + "Ġfavor ing", + "El izabeth", + "Ãį Ãį", + "Ġr anger", + "77 2", + "ĠAr che", + "h ak", + "ĠGen etics", + "ĠF EMA", + "Ġev olves", + "Ġest e", + "ĠP ets", + "ĠM é", + "ĠInterest ing", + "ĠCanter bury", + "ch apter", + "ĠStar fleet", + "Sp anish", + "Ġdraw back", + "ĠNor wich", + "9 70", + "n orth", + "ag anda", + "Ġtransform ative", + "ram ids", + "bi ology", + "ad ay", + "Ġpropag ation", + "ĠGam ma", + "ĠDen ise", + "ĠCalcul ator", + "ent imes", + "ĠB ett", + "Ġapp endix", + "ĠHD D", + "AK ING", + "Ġst igmat", + "Ġhol ster", + "Ġord inarily", + "Ch ance", + "ĠCont rary", + "Ġad hesive", + "Ġgather s", + "6 12", + "re au", + "ony ms", + "ew ays", + "Ġindu ces", + "Ġinterchange able", + "se m", + "Wh it", + "Ġtr ance", + "Ġincorpor ation", + "ĠExt ras", + "Fin ancial", + "Ġawkward ly", + "ĠStur geon", + "ĠH Y", + "Norm ally", + "ĠEnd ing", + "ĠAss ist", + "enc rypted", + "Ġsub jug", + "Ġn os", + "Ġfan atic", + "C ub", + "C U", + "?\" .", + "Ġirre versible", + "å Ĥ", + "03 1", + "ĠH AR", + "sp read", + "ul ia", + "= $", + "Sc ope", + "L ots", + "Ġlif estyles", + "ol on", + "Ġf eds", + "Ġcongrat ulate", + "web kit", + "Ġindist inguishable", + "ĠSw ing", + "Ġcommand ments", + "qu ila", + "ab ella", + "m ethyl", + "ann abin", + "Ġo vere", + "Ġlob ster", + "ĠQU EST", + "ĠCONT IN", + "bern atorial", + ":::: ::::", + "ĠTra ve", + "ĠSam oa", + "AN I", + "75 2", + "Ð ´", + "userc ontent", + "ĠMod erate", + "y eah", + "ĠK itt", + "Ġwe e", + "Ġstuff ing", + "ĠInter vention", + "ĠD ign", + "Ġware houses", + "ĠF iji", + "Ġpel lets", + "Ġtake away", + "ĠT ABLE", + "ĠClass ical", + "col lection", + "Ġland fall", + "ĠMus cle", + "Ġsett les", + "ĠAD V", + "Ġ3 44", + "L aura", + "Ġf ared", + "ĠPart ial", + "4 36", + "oss ibility", + "ĠD aly", + "ĠT arant", + "ĠFu ji", + "am l", + "c ence", + "55 1", + "ĠProced ures", + "ĠO CD", + "ĠU D", + "t in", + "Q UI", + "ach o", + "4 38", + "Ġgl itches", + "Ġenchant ment", + "Ġcalcul ates", + "IR O", + "ĠH ua", + "alys es", + "ĠL ift", + "um o", + "Ġle apt", + "Ġhypothes ized", + "ĠGust av", + "it ans", + "VERS ION", + "æ ł", + "Rog er", + "Ġr and", + "ĠAd apter", + "Ġ3 31", + "ĠPet ition", + "k ies", + "M ars", + "Ġunder cut", + "ze es", + "ĠLy ons", + "ĠDH CP", + "Miss ing", + "Ġretire es", + "Ġins idious", + "el i", + "> )", + ". ãĢį", + "Ġfinal ists", + "ĠA ure", + "Ġacc user", + "Ġwas tes", + "ĠY s", + "ĠL ori", + "Ġconstitu encies", + "Ġsupp er", + "Ġmay hem", + "or ange", + "Ġmis placed", + "Ġmanager ial", + "Ġex ce", + "ĠCL I", + "Ġprim al", + "ĠL ent", + "Cry stal", + "h over", + "ĠN TS", + "end um", + "Ġd w", + "ĠAl c", + "n ostic", + "Ġpres erves", + "ĠTs arnaev", + "Ġtri pled", + "rel ative", + "Arc ade", + "k illing", + "ĠW EEK", + "ĠH anna", + "D ust", + "Com pleted", + "ģ «", + "Ġappro ves", + "ĠSur f", + "ĠLuther an", + "ven ants", + "Ġrobber ies", + "we ights", + "soft ware", + "at ana", + "ug al", + "Ġgrav y", + "ĠC ance", + "OLOG Y", + "ly ak", + "Ton ight", + "Ġunve il", + "Ġ19 04", + "ĠMin ion", + "ent ious", + "st ice", + "pack ages", + "ĠG EAR", + "Ġg ol", + "ĠHutch inson", + "ĠProf ession", + "ĠG UN", + "ĠDiff erence", + "ĠTsuk uyomi", + "ĠLes bian", + "6 70", + "Ġfug itive", + "ĠPlan etary", + "-------------------------------- ------------------------", + "Ġacc rued", + "Ġch icks", + "Ġsto pp", + "Ġblock ers", + "C od", + "Ġcomment ers", + "ĠSomew here", + "ĠPhot ographer", + "the me", + "Ġmay oral", + "w u", + "Ġanten nas", + "Ġrev amped", + "ĠSubject s", + "it é", + "im ura", + "Ġentr ances", + "liter ally", + "Ġten ets", + "ĠO MG", + "ĠMP H", + "ĠDon key", + "ĠOff ense", + "Ġ\" +", + "Sn ap", + "ĠAF B", + "Ġan imate", + "ĠS od", + "His panic", + "Ġinconsist ency", + "D b", + "F Y", + "Ex port", + "Ġa pe", + "Ġpear l", + "ib el", + "ĠPAC s", + "Ġ{ \\", + "Ġact u", + "ĠHS BC", + "camp us", + "Ġpay off", + "Ġde ities", + "ĠN ato", + "ou ple", + "Ġcens ored", + "ĠCl ojure", + "Ġconf ounding", + "en i", + "Ġreck on", + "op he", + "Ġspot ting", + "Ġsign ifies", + "Ġprop el", + "Ġfest ive", + "S uggest", + "Ġpled ging", + "ĠB erman", + "Ġrebell ious", + "Ġovershadow ed", + "Ġinfiltr ated", + "j obs", + "67 2", + "Ġscal able", + "Ġdomin ion", + "ĠNew foundland", + "ĠMead ow", + "Ġpart itions", + "AM I", + "Ġsupplement ary", + "str ument", + "Ġhair y", + "Ġperpet uate", + "Ġnuts hell", + "ĠPot ato", + "ĠHob bit", + "Ġcur ses", + "Flo at", + "Ġquiet er", + "Ġfuel ing", + "Ġcaps ules", + "ĠL ust", + "ĠH aunted", + "Exec utive", + "Ġchild birth", + "G re", + "Ġrad iant", + "å İ", + "Ġm alls", + "Ġin ept", + "ĠWarrant y", + "Ġspect ator", + "E h", + "t hens", + "Ġculmin ating", + "æ ©", + "ary a", + "ãĤ ®", + "ilit arian", + "ĠOR IG", + "ĠSp ending", + "pt ives", + "ĠS iren", + "ĠRec ording", + "ay ne", + "Ġv im", + "Ġspr ang", + "T ang", + "ĠM FT", + "mor ning", + "ĠWe ed", + "m peg", + "cess ion", + "ĠCh ung", + "7 30", + "w arning", + "56 2", + "handed ly", + "P oor", + "P olitics", + ": #", + "Ġp ian", + "Ġfec es", + "ĠDocument ation", + "Ġban ished", + "Ġ3 99", + "ĠAR C", + "Ġhe inous", + "J ake", + "ĠAm ir", + "way ne", + "v re", + "os henko", + "Ġnotebook s", + "Ġfound ational", + "Ġmarvel ous", + "ixt ape", + "Ġwithdraw als", + "Ġh orde", + "ĠD habi", + "is able", + "ĠK D", + "Ġcontag ious", + "ĠD ip", + "ĠAr rows", + "Ġpronoun s", + "Ġmorph ine", + "ĠB US", + "68 2", + "Ġk osher", + "fin ished", + "ĠInstr uments", + "Ġf used", + "yd en", + "ĠSal mon", + "F ab", + "aff ected", + "K EN", + "C ENT", + "Dom ain", + "Ġpoke mon", + "ĠDr inking", + "G rowing", + "ĠInvestig ative", + "ĠA ether", + "em i", + "Ġtabl oid", + "Ġrep ro", + "ĠNot withstanding", + "ĠBers erker", + "Ġdram as", + "Ġclich é", + "Ġb ung", + "ĠU RI", + "ĠD os", + "0 44", + "Ġpast ors", + "Ġl s", + "Ġac rylic", + "aun ts", + "Ed ward", + "Ġmajor ities", + "B ang", + "Ġfield ing", + "ĠRepl acement", + "ĠAl chemy", + "pp ard", + "ĠRome o", + "ĠSan ct", + "ĠLav rov", + "ib ble", + "Inst ruct", + "Ġimp ractical", + "ĠPlay boy", + "ce phal", + "Ġsw aps", + "Ġk an", + "ĠThe o", + "Ġillust rating", + "Ġdismant led", + "ĠTrans gender", + "ĠG uth", + "UG H", + "Ġtriumph ant", + "Ġencomp ass", + "Ġbook mark", + "udd in", + "j er", + "Ġpred icate", + "ES H", + "Ġwhen ce", + "ĠAB E", + "Ġnon profits", + "Se qu", + "Ġdi abetic", + "Ġp end", + "Ġheart felt", + "sh i", + "Ġinter acts", + "ĠTele com", + "Ġbombard ment", + "dep ending", + "ĠLow ry", + "ĠAd mission", + "ĠBl ooming", + "ust ration", + "ene gger", + "B rew", + "Ġmol ten", + "ĠNer d", + "P IN", + "âĸ Ģ", + "ave ment", + "Ġtou red", + "Ġco efficients", + "ĠTray von", + "ans son", + "Ġsand y", + "t old", + "fl ows", + "Ġpop ulous", + "ĠT inder", + "ĠBl iss", + "R achel", + "Min imum", + "Ġcontest ant", + "ĠRed uce", + "ĠMor se", + "ĠGrass ley", + "ĠClick er", + "Ġexp r", + "Ġs incerity", + "Ġmar qu", + "Ġelic it", + "ĠPro position", + "ĠDemon ic", + "Ġtac os", + "G reek", + "Ġpost war", + "Ġin sofar", + "ĠP ork", + "Ġ35 2", + "doctor al", + "walk ing", + "Ġmid term", + "ĠSam my", + "sight ed", + "ĠTR ANS", + "ic i", + "AL D", + "ĠUS L", + "ĠF ISA", + "ĠAm pl", + "ĠAlex andra", + "ine lli", + "Tr ain", + "Ġsign ify", + "ĠVers us", + "Ġob fusc", + "Ġk h", + "Ġagg ro", + "ĠRen ault", + "Ġ3 48", + "5 18", + "ox icity", + "0 22", + "ĠTw ist", + "Ġgoof y", + "D ynamic", + "Ġbrief ings", + "m ight", + "8 99", + "Ġderog atory", + "T ro", + "Ġfor ging", + "ĠKor an", + "ĠMar ried", + "ĠBuc s", + "Ġpal ate", + "ĠCon version", + "m able", + "4 13", + "Ġ( _", + "Ġs iph", + "ĠN EO", + "col lege", + "Ġmarg inally", + "Ġfl irt", + "ĠTra ps", + "ĠP ace", + "é »Ĵ", + "Ġgoalt ender", + "Ġforb ids", + "Ġcler ks", + "ĠT ant", + "ĠRobb ins", + "ĠPrint ing", + "Ġpremie red", + "Ġmagn ification", + "ĠT G", + "ĠR ouse", + "ĠM ock", + "odynam ics", + "Ġpre clude", + "ism o", + "ĠPul itzer", + "Ġaval anche", + "ĠK odi", + "rib une", + "ĠL ena", + "Elect ric", + "Ġref inery", + "Ġend owed", + "Ġcounsel ors", + "Ġd olphin", + "ĠM ith", + "Ġarm oured", + "hib ited", + "Beg in", + "ĠP W", + "O il", + "ĠV or", + "ĠShar if", + "ĠFraz ier", + "est ate", + "Ġj ams", + "Pro xy", + "Ġband its", + "ĠPresbyter ian", + "ĠPrem iere", + "t iny", + "ĠCru el", + "Test ing", + "Ġhom er", + "ĠV ERS", + "ĠPro l", + "ĠDep osit", + "ĠCoff in", + "Ġsemin ars", + "Ġs ql", + "ĠDef endants", + "Altern atively", + "ĠR ats", + "ç «", + "ethy st", + "' >", + "Ġiss uer", + "58 9", + "Ġch aired", + "ĠAccess ories", + "man ent", + "Ġmar row", + "ĠPrim ordial", + "C N", + "Ġlimit less", + "ĠCarn age", + "Ġund rafted", + "q v", + "IN ESS", + "on ew", + "Ġco hesion", + "98 7", + "Ġne cks", + "Ġfootball er", + "ĠG ER", + "Ġdetect able", + "ĠSupport ing", + "ĠCS V", + "oc ally", + "k Hz", + "Ġund e", + "Ġsh one", + "Ġbud ding", + "tra k", + "Stand ing", + "ĠStar craft", + "ĠKem p", + "Ben ch", + "Ġthw arted", + "ĠGround s", + "ath i", + "L isa", + "Dial og", + "ĠS X", + "V ision", + "Ġingen ious", + "Ù IJ", + "Ġfost ering", + "ĠZ a", + "ĠIn gram", + "Ġ\" @", + "N aturally", + "6 16", + "0 35", + "ĠF AC", + "H mm", + "55 4", + "Ġacceler ator", + "ĠV end", + "Ġsun screen", + "Ġtuber culosis", + "rav iolet", + "ĠFunction al", + "ĠEr rors", + "ed ar", + "19 66", + "ĠSpect re", + "ĠRec ipes", + "88 5", + "ĠM ankind", + "L iverpool", + "Ġ| --", + "Ġsubst itutes", + "ĠX T", + "w ired", + "Ġinc o", + "ĠAf gh", + "E va", + "ic c", + "S ong", + "K night", + "Ġdilig ently", + "ĠBroad cast", + "A id", + "Ġaf ar", + "ĠH MS", + "aton in", + "ĠGr ateful", + "Ġfire place", + "ĠOm ni", + "e uro", + "ĠF RE", + "ĠSh ib", + "ĠDig est", + "t oggle", + "Ġheads ets", + "Ġdiff usion", + "ĠSqu irrel", + "ĠF N", + "Ġdark ened", + "out her", + "Ġsleep s", + "ĠX er", + "gun s", + "Ġset ups", + "Ġpars ed", + "Ġmamm oth", + "ĠCur ious", + "g ob", + "ĠFitz patrick", + "ĠEm il", + "im ov", + "........ .....", + "ĠB enny", + "Second ly", + "Ġheart y", + "Ġcons on", + "st ained", + "Ġgal actic", + "cl ave", + "Ġplummet ed", + "Ġp ests", + "Ġsw at", + "Ġrefer rals", + "ĠLion el", + "h oly", + "Ġunder dog", + "ĠSl ater", + "ĠProv ide", + "ĠAm ar", + "ress or", + "å Į", + "ong a", + "Ġtim id", + "Ġp iety", + "ĠD ek", + "Ġsur ging", + "az o", + "Ġ6 10", + "Ġdes ks", + "ĠSp okane", + "ĠAn field", + "Ġwars hips", + "ĠCob ra", + "Ġar ming", + "clus ively", + "ĠBad ge", + "ag ascar", + "ĠPR ESS", + "ĠMcK enzie", + "ĠFer dinand", + "burn ing", + "Af ee", + "Ġtyr ann", + "ĠI w", + "ĠBo one", + "100 7", + "ĠRe pt", + "Ċ Âł", + "Ġcar avan", + "ĠD ill", + "ĠBundes liga", + "Ch uck", + "Ġheal er", + "ãĥ¼ãĥ Ĩ", + "ĠH obby", + "Ġneg ate", + "Ġcrit iques", + "section al", + "mop olitan", + "Ġd x", + "Ġouts ourcing", + "ĠC ipher", + "t ap", + "Sh arp", + "Ġup beat", + "Ġhang ar", + "Ġcru ising", + "ĠNi agara", + "Ġ3 42", + "ill us", + "ĠS v", + "Ġsubt itles", + "Ġsqu ared", + "Ġbook store", + "Ġrevolution aries", + "ĠCarl ton", + "ab al", + "Ut ah", + "Ġdesp ise", + "ĠU M", + "cons ider", + "aid o", + "Ġc arts", + "ĠT urtles", + "Tr aining", + "Ġhonor ary", + " ¢", + "Ġtri angles", + "4 22", + "Ġreprint ed", + "Ġgrace ful", + "ĠMong olia", + "Ġdisrupt ions", + "ĠB oh", + "Ġ3 49", + "Ġdr ains", + "Ġcons ulate", + "Ġb ends", + "Ġm afia", + "ur on", + "ĠF ulton", + "m isc", + "Ġren al", + "Ġin action", + "ck ing", + "Ġphot ons", + "Ġbru ised", + "ĠC odes", + "og i", + "Ġn ests", + "ĠLove ly", + "ĠLib re", + "ĠD aryl", + "Ġ# ##", + "S ys", + ". ,\"", + "Ġfree zes", + "est ablishment", + "and owski", + "Ġcum bers", + "ĠSt arg", + "ĠBom bs", + "Ġleg ions", + "Ġhand writing", + "Ġgr un", + "ĠC ah", + "sequ ent", + "Ġm oth", + "ĠMS M", + "Ins ert", + "F if", + "Ġmot el", + "Ġdex ter", + "ĠB ild", + "hearted ly", + "Ġpro pe", + "ĠText ure", + "ĠJ unction", + "ynt hesis", + "oc ard", + "ĠVer a", + "ĠBar th", + "Ġμ g", + "Ġl ashed", + "Ġ35 1", + "ĠZ amb", + "ĠSt aples", + "ĠCort ex", + "ĠCork er", + "Ġcontinu um", + "ĠWR ITE", + "unt a", + "rid or", + "Ġde ems", + "0 33", + "ĠG OLD", + "p as", + "Ġrep ressive", + "ãĥĨ ãĤ£", + "Ġbaff led", + "Sc ar", + "Ġc rave", + "Ġ ______", + "Ġentrepreneurs hip", + "ĠDirector ate", + "Ġ' [", + "Ġv ines", + "Ġasc ended", + "ĠGR OUP", + "ĠGood bye", + "Ġdo gged", + "ãĥ´ ãĤ¡", + "Man ufact", + "Ġunimagin able", + "ri ots", + "ier rez", + "Ġrel ativity", + "ĠCraft ing", + "ra ught", + "ud en", + "c ookie", + "Ġassass ins", + "Ġdissatisf ied", + "ac ci", + "Ġcondu it", + "Sp read", + "ĠR ican", + "n ice", + "izz le", + "Ġsc ares", + "ĠWH Y", + "ph ans", + "5 35", + "Ġprot racted", + "ĠKrist en", + "5 36", + "ĠSc rib", + "ĠNe h", + "Ġtwent ies", + "Ġpredic ament", + "Ġhandc uffs", + "Ġfruit ful", + "ĠU L", + "ĠLud wig", + "Ġatt est", + "ĠBre aker", + "Ġbi ologically", + "ĠDeal er", + "Ġrenov ations", + "f w", + "ess en", + "Al ice", + "ĠHen ri", + "Ġun ilaterally", + "ĠS idd", + "h ai", + "ĠSt retch", + "S ales", + "Ġcumbers ome", + "ĠJ avier", + "Ġtrend y", + "Ġrot ting", + "ĠChall enges", + "Ġscra ps", + "Ġfac ets", + "ĠVer onica", + "ĠVer ge", + "ĠS ana", + "Al ien", + "ĠR ih", + "Ġrad ial", + "ect ar", + "Ġ6 30", + "cl i", + "Mar ie", + "Ġwild fire", + "ĠCat o", + "h ander", + "Ġwait ress", + "Ġch ops", + "ĠS ECTION", + "Ġblunt ly", + "ĠCat alog", + "n ian", + "stud y", + "Ġpat rolling", + "ĠT enth", + "nex us", + "ĠN ON", + "op sy", + "Ġsc athing", + "s ie", + "Ġdeterior ated", + "V B", + "Naz is", + "Ġdep ictions", + "Ġauthent icated", + "ĠCon ce", + "k rit", + "Ġpromul g", + "ĠL ONG", + "U FC", + "ĠVis itors", + "ĠRec all", + "Ġrehab ilit", + "ĠSL I", + "Ġglac ier", + "ĠB ite", + "Ġ50 3", + "Ġvom it", + "Ġfer mented", + "ĠKh alid", + "Ġgrad ed", + "ĠMag icka", + "ĠIch igo", + "power ful", + "ic ators", + "75 3", + "Ġsh rew", + "Ġ35 6", + "Ġlegal izing", + "Ġall otted", + "ĠArch demon", + "ith ing", + "igg urat", + "V OL", + "Le od", + "Ġo ily", + "Ġindu cing", + "Ġamy gdala", + "Ġadm ins", + "ĠAcqu isition", + "C AN", + "Ġsche matic", + "Ġmo an", + "ĠCamer oon", + "Ġt ink", + "Ġmer ry", + "Ġbutter flies", + "ĠGo ff", + "Ġworks pace", + "ĠCor ona", + "Ġj avascript", + "ĠD olphin", + "ĠCant or", + "4 64", + "to e", + "AP S", + "ĠAg ing", + "Ġpadd ed", + "ĠZ heng", + "ĠHe ld", + "Ġest ranged", + "Ġ7 70", + ". }", + "ĠDun ham", + "Ġsm okes", + "Ġcap itals", + "und ai", + "Sh in", + "ĠFound ing", + "Ġent itle", + "Ġcenter piece", + "D iscover", + "Ġthere to", + "al ert", + "ĠN ou", + "ĠAnaly st", + "l c", + "F H", + "FI ELD", + "ĠP OV", + "gr ay", + "Ġar cs", + "ĠH OT", + "Ġr s", + "Ġoblig atory", + "ĠArchitect s", + "ĠS ven", + "ĠF EC", + "0 200", + "Christ mas", + "ĠAlban ia", + "rat om", + "58 7", + "Ġhard ships", + "Ġaut os", + "ĠCharg es", + "Ġap es", + "Ġ3 76", + "wal let", + "Ġintox ication", + "Ġgobl in", + "Ġ5 70", + "++++++++ ++++++++", + "ĠYel p", + "ĠMag netic", + "ĠBr iggs", + "R ail", + "Ġspawn s", + "ĠW iggins", + "Ġshowc ased", + "Ġres orted", + "ub en", + "Ġwh ipping", + "Ġim itate", + "Ġdigest ion", + "ĠUS PS", + "ĠG est", + "Ġye a", + "ĠT ight", + "ind al", + "ic as", + "` .", + "C AST", + "'' ;", + "ĠF et", + "opath ic", + "In valid", + "Ġregrett ed", + "Ġbro ccoli", + "ĠSc ores", + "e ve", + "Ġpost ings", + "Ġaccum ulating", + "Ġneed less", + "elf th", + "Ġmay ors", + "Ġsc rib", + "Ġanecd otes", + "Ġbot ched", + "ĠRib bon", + "ĠConstant ine", + "i uses", + "ess es", + "Ġdev ise", + "Comp ared", + "Ġp udding", + "Ġg arg", + "Ġev oke", + "79 7", + "Ġdet ox", + "9 09", + "ĠPie ces", + "ĠMcC artney", + "Ġmet ast", + "ĠK rypt", + "P OR", + "Ġt ending", + "ĠMerch ants", + "Pro of", + "ĠV arg", + "ĠPort able", + "ãĥ¼ãĥĨ ãĤ£", + "B rain", + "25 00", + "Ġfol iage", + "Ø ¹", + "Ġment ors", + "ĠA ires", + "Ġminimal ist", + "Ġing ested", + "ĠTro jan", + "ĠQ ian", + "inv olved", + "0 27", + "Ġer oded", + "RA FT", + "Ġbl urry", + "M ob", + "Ġbuff et", + "ĠFn atic", + "ae a", + "KN OWN", + "ĠIn it", + "s afety", + "en um", + "ACT ION", + "ĠCrus her", + "ĠD ates", + "Ġ ................", + "c alling", + "ak ov", + "Ġvent ured", + "Ġ5 55", + "au ga", + "H art", + "ĠA ero", + "M AC", + "Ġthin ly", + "Ġar ra", + "ST ATE", + "ild e", + "ĠJac qu", + "ĠFem ales", + "Ġthe orem", + "Ġ3 46", + "Ġsmart est", + "ĠPU BLIC", + "ĠK ron", + "ĠB its", + "ĠV essel", + "ĠTele phone", + "Ġdec ap", + "Ġadj unct", + "ĠS EN", + "mer ga", + "Ġred acted", + "Ġpre historic", + "Ġexplan atory", + "ĠRun s", + "ĠUtt ar", + "ĠM anny", + "ĠAUTH OR", + "ĠUnle ashed", + "ĠBow ling", + "be ans", + "79 3", + "Ġunivers es", + "Ġsens it", + "ĠK ung", + "re peat", + "ctr l", + "Ġp aced", + "Ġfull er", + "Cl ock", + "Ġrec omb", + "ĠF aul", + "ĠB unker", + "Ġpool ed", + "Ġan a", + "ĠM outh", + "LL OW", + "hum ane", + "Ġbull do", + "ĠMicha els", + "f am", + "Ġwreck ed", + "Ġport rays", + "ĠWh ale", + "ĠH es", + "Ġguess es", + "ĠBrow se", + "ĠL APD", + "Ġconsequ ential", + "ĠInn ocent", + "ĠD RAG", + "Ġtrans gress", + "ĠO aks", + "Ġtri via", + "ĠRes on", + "ĠA DS", + "-- +", + "ĠT oll", + "Ġgrasp ing", + "ĠTHE M", + "ĠT ags", + "ĠCon clusion", + "Ġpract icable", + "Ġho op", + "Ġunintention ally", + "Ġign ite", + "ĠM ov", + "ur ized", + "le hem", + "Ter min", + "Ġcolour ful", + "ĠLin ear", + "ĠEll ie", + "G y", + "Ġman power", + "Ġj s", + "Ġem oji", + "ĠSHAR ES", + "_ .", + "0000 7", + "Ġsophistic ation", + "Ġunders core", + "Ġpract ise", + "Ġbl ob", + "op ens", + "Uk raine", + "Ke eping", + "Y C", + "J R", + "ult imate", + "Cl aim", + "Ġautom obiles", + "99 3", + "ste el", + "Ġpart ing", + "ĠL ank", + "... ?", + "Ġ38 5", + "Ġremem brance", + "Ġe ased", + "Ġcov ari", + "ĠS ind", + "Effect ive", + "Ġdisse mination", + "ĠMo ose", + "ĠCl apper", + "br ates", + "App ly", + "Ġinv is", + "Ġwors ened", + "âĢĶ -", + "Ġlegisl ator", + "ĠL ol", + "ĠRow e", + "Ġdealers hip", + "um ar", + "id ences", + "Ġinvestig ates", + "Ġc ascade", + "Ġbid der", + "ĠB EN", + "Iron ically", + "Ġpres iding", + "Ġd ing", + "Ġcontrad icted", + "Ġshut s", + "ĠF IX", + "Ġ3 66", + "Dist rict", + "Ġsin ful", + "ĠChar isma", + "o ops", + "Ġtot ality", + "Ġrest itution", + "ĠOpt imus", + "ĠD ah", + "Ġcl ueless", + "urn ed", + "Ġnut rit", + "Ġland owners", + "Ġfl ushed", + "Ġbroad en", + "m ie", + "Ġprint ln", + "Ġn ig", + "ĠCorp us", + "J en", + "Ġprot o", + "ĠWik imedia", + "ĠPal o", + "C OR", + "Ġstory lines", + "Ġevangel icals", + "ĠDar rell", + "Ġrot or", + "ĠH W", + "sk illed", + "ery l", + "Ġbe gg", + "ĠBl umenthal", + "Ġwe aving", + "Ġdown wards", + "ĠJack et", + "ĠANG EL", + "Te chnology", + "Ġes oteric", + "alde hyde", + "Ġfur iously", + "Ġforeign er", + "We ak", + "CH O", + "ĠH ound", + "Exper ience", + "ĠPlay station", + "ĠM IA", + "ĠU ng", + "cl oth", + "ag all", + "Ġcal ming", + "iz ens", + "St ruct", + "ĠW itches", + "ĠCeleb ration", + "Ġ........ ......", + "pt roller", + "ĠTC U", + "Ġb unny", + "ãĥ į", + "ut orial", + "Ġup scale", + "ĠSt a", + "ĠCol ossus", + "Ġchlor ide", + "ĠZ ac", + "ĠRe asons", + "ĠBrook ings", + "ĠWH ITE", + "][ /", + "ĠL ose", + "9 05", + "Ġunders ide", + "ern els", + "Ġv ape", + "do zen", + "upp et", + "ĠST OP", + "mat ical", + "ĠStat ements", + "hed dar", + "P AC", + "Custom er", + "Ġmem os", + "ĠP J", + "end ars", + "ĠLim its", + "l augh", + "Ġstabil ized", + "ĠALE C", + "Y A", + "Up grade", + "al am", + "Ġtechn o", + "Ġan ew", + "fore seen", + "Ġcolleg iate", + "ĠPy ro", + "ĠD ism", + "Ġfront line", + "Ġammon ia", + "I U", + "Qu ite", + "John ny", + "ass in", + "G OP", + "ĠSt yles", + "ĠSovere ign", + "acter ial", + "5 49", + "ĠR IP", + "ĠL ists", + "Ġ3 64", + "ĠRece p", + "s ocket", + "ĠByr d", + "ĠCand le", + "An cient", + "Ġappell ant", + "en forcement", + "ace a", + "ans ki", + "Ġold s", + "88 6", + "Ġsl urs", + "Ġem pires", + "Ġbuck le", + "Ġalien ation", + "ĠAber deen", + "Ġunic orn", + "Ġoverr iding", + "ĠL X", + "pp a", + "Ġdesp ised", + "ĠB ugs", + "ĠB ST", + "S outhern", + "5 33", + "Ġhall mark", + "ĠPost er", + "Ġstem med", + "Ġprincip als", + "ĠT ECH", + "ĠSand wich", + "It aly", + "Ġche esy", + "ĠSet TextColor", + "ĠProt ective", + "ĠC ohn", + "J O", + "apt op", + "Re ason", + "Lead er", + "ĠUnder stand", + "ĠFr idays", + "ĠContin uous", + "Ġcl ipping", + "ĠR ye", + "Ġber th", + "tim er", + "ann is", + "re act", + "Ġbuff alo", + "ĠPar as", + "Ġ6 55", + "Ġpres ided", + "ĠSun rise", + "Ġve ts", + "Ġcl oves", + "ĠMcC ull", + "Stre ngth", + "G AN", + "Ġill iter", + "ĠPric ing", + "l é", + "Ġresist or", + "Ġbr un", + "ĠSuff olk", + "Ñ ĭ", + "ĠL iver", + "Re leased", + "Ġwhat s", + "8 60", + "ĠMe asures", + "Ġden ouncing", + "ĠRy zen", + "Ġsou ven", + "Ġcareg ivers", + "ch ini", + "ĠScar lett", + "Ġt rough", + "Cong ratulations", + "Ġtax is", + "ĠTrad ition", + "j it", + "Ġtable top", + "Ġhither to", + "Ġdis information", + "off ensive", + "h ra", + "ĠDISTR ICT", + "Ġcompl icate", + "chen ko", + "ĠRecon struction", + "Ġpalp able", + "Ġa usp", + "Ġ4 28", + "Ġshowc ases", + "ĠPublic ation", + "know ledge", + "inn on", + "4 19", + "Ġretri eval", + "and ers", + "Ġref ute", + "Ġinqu ired", + "g ur", + "Ġneg ativity", + "Ġcons erve", + "Ġafter life", + "Ġpres upp", + "ĠGill espie", + "Ġm t", + "ĠD N", + "T ap", + "Ġper pend", + "ĠS my", + "does n", + "Ġsp illing", + "Ġhyp ers", + "K ate", + "® ,", + "ke pt", + "ĠP owered", + "Ġj a", + "ĠK lux", + "ard e", + "ab an", + "Ġ4 44", + "Ġflatt ened", + "ĠImprove ments", + "urg a", + "ĠK und", + "Ġins cribed", + "Ġfac ult", + "Ġunpre pared", + "ĠCons umers", + "Ġsatisf ies", + "Ġpul monary", + "Ġinf iltration", + "Ġex ternally", + "Ġcongrat ulations", + "ag han", + "Ġair liner", + "Ġfl ung", + "Ġfly ers", + "G D", + "Ġsnipp ets", + "Ġrec ursive", + "Ġmaster ing", + "L ex", + "Ġovert ly", + "v g", + "Ġluck ily", + "Ġenc ro", + "ĠLanc et", + "ĠAbyss al", + "function al", + "Ġs ow", + "Ġsqu id", + "Ġnar ration", + "Ġn aughty", + "ĠHon our", + "ĠSpart ans", + "Ġsh atter", + "ĠTac oma", + "ĠCal ories", + "ĠR aces", + "Sub mit", + "Ġpurpose fully", + "w av", + "ĠY ok", + "F est", + "ĠG err", + "Met ro", + "Ġit iner", + "f amous", + "Ġ\" {", + "in line", + "was her", + "Iss ue", + "ĠCL IENT", + "oz o", + "Vers ions", + "7 25", + "ĠGl ock", + "Ġshield ed", + "ĠPC R", + "ENC Y", + "ĠWe ld", + "ĠSim pl", + "Ġredirect ed", + "ĠK ham", + "Ġ( >", + "Ġlab ou", + "Ġdi apers", + "ss l", + "Ġcell ar", + "organ isms", + "ore sc", + "ĠBer ks", + "did n", + "Sh ipping", + "C hest", + "Ġund one", + "Ġmillion aire", + "Ġc ords", + "ĠYoung er", + "appropri ately", + "Ġsequ els", + "u ve", + "ant icipated", + "Ġle wd", + "ĠSh irt", + "ĠDmit ry", + "V eter", + "Ġsl aying", + "ĠY ar", + "Ġcompl ication", + "I owa", + "ĠEric a", + "ĠBL M", + "g irlfriend", + "b odied", + "6 26", + "19 63", + "Ġintermedi ary", + "Ġcons olation", + "M ask", + "ĠSi em", + "ow an", + "Beg inning", + "Ġfix me", + "Ġculmin ated", + "Ġcon duc", + "ĠVolunte er", + "Ġpos itional", + "Ġgre ets", + "ĠDefin itions", + "Ġthink er", + "Ġingen uity", + "Ġfresh men", + "ĠMom ents", + "Ġ35 7", + "ate urs", + "ĠFed Ex", + "s g", + "69 4", + "Ġdwind ling", + "ĠBO X", + "sel age", + "Ġt mp", + "Ġst en", + "ĠS ut", + "Ġneighbourhood s", + "Ġclass mate", + "f ledged", + "Ġleft ists", + "Ġclim ates", + "ATH ER", + "ĠScy the", + "ul iffe", + "Ġs ag", + "Ġho pped", + "ĠF t", + "ĠE ck", + "ĠC K", + "ĠDo omsday", + "k ids", + "Ġgas ped", + "Ġmon iker", + "ĠL od", + "ĠC FL", + "t ions", + "r ums", + "fol ios", + "Ġm d", + "Ġunc anny", + "Ġtrans ports", + "ĠLab rador", + "Ġrail ways", + "Ġappl iance", + "ĠCTR L", + "æ Ģ", + "Pop ulation", + "ĠConfeder acy", + "Ġunb earable", + "Ġdors al", + "ĠIn form", + "op ted", + "ĠK ILL", + "Mar x", + "Ġhypoc ritical", + "q us", + "ĠN umerous", + "ĠGeorg ian", + "ĠAmbro se", + "ĠL och", + "Ġgu bernatorial", + "ĠX eon", + "ĠSupp orts", + "ens er", + "ee ly", + "ĠAven ger", + "19 65", + "Ar my", + "Ġju xtap", + "Ġcho pping", + "ĠSpl ash", + "ĠS ustainable", + "ĠFin ch", + "Ġ18 61", + "ict ive", + "at meal", + "ĠG ohan", + "Ġlights aber", + "ĠG PA", + "ug u", + "ĠRE PL", + "vari able", + "Ġher pes", + "Ġdesert s", + "ac iously", + "Ġsitu ational", + "week ly", + "ob l", + "Ġtext ile", + "ĠCorn wall", + "Ġcontrace ptives", + "ĠA ke", + "] -", + "ä¹ ĭ", + ": ,", + "ĠW em", + "ĠB ihar", + "Ġ' .", + "Ġbe re", + "Ġanal ogue", + "ĠCook ies", + "Ġtake off", + "Whe el", + "Ġmaj estic", + "Ġcomm uting", + "0 23", + "ĠCor pse", + "ass ment", + "min i", + "Ġgor illa", + "ĠAl as", + "ere e", + "Ġacquaint ances", + "ĠAd vantage", + "Ġspirit ually", + "Ġey ed", + "pm wiki", + "ĠE nder", + "Ġtrans lucent", + "Ġnight time", + "ĠIM AGES", + "5 45", + "ĠK amp", + "ĠFre ak", + "Ġ ig", + "Port land", + "4 32", + "ĠM ata", + "Ġmar ines", + "Ġh ors", + "ater asu", + "ĠAtt ribution", + "Ġ-------- -", + "Ġk ins", + "ĠBEL OW", + "++ +", + "Ġre eling", + "ol ed", + "Ġcl utter", + "ĠRel ative", + "Ġ4 27", + "B US", + "Ġa vert", + "ĠChe ong", + "ĠA ble", + "ĠPry or", + "Develop er", + "Ġen cyclopedia", + "ĠUSA F", + "ĠG arry", + "Sp ain", + "Bl ocks", + "Ġexp osition", + "ĠGamer Gate", + "W OR", + "Ġstockp ile", + "Ġclot hed", + "ĠT one", + "ĠR ue", + "t umblr", + "Ġtreacher ous", + "Ġf rying", + "Ñ Į", + "ĠS ph", + "Ġrest raints", + "Ġemb odies", + "ĠG es", + "S afety", + "Ġnegoti ators", + "min ing", + "ĠAppalach ian", + "L OS", + "ĠJenn a", + "Ġpass ers", + "ç ĭ", + "sn ap", + "Ġshort en", + "creat or", + "Ġinn umerable", + "uther land", + "67 4", + "ĠW OM", + "ĠAs cend", + "ĠArm ory", + "ĠTrans action", + "K ick", + "Ġsuit case", + "day Name", + "Ġwaste ful", + "mar riage", + "ĠMcC abe", + "ite ch", + "ĠO ss", + "Cl osure", + "ĠTreasure r", + "Ġindec ent", + "ĠD ull", + "Ġresid ences", + "19 59", + "ĠS ettlement", + "Ham ilton", + "Ġself ies", + "ĠRank ing", + "ĠBark ley", + "ĠB ore", + "ĠW CS", + "ĠMar itime", + "ĠH uh", + "ĠForest ry", + "Ġcultiv ating", + "ĠBall ard", + "Ġg arrison", + "ĠSD L", + "9 30", + "Ġnas cent", + "Ġirresist ible", + "Ġaw fully", + "\\/ \\/", + "Ġequ ate", + "Ġanthrop ology", + "ĠSylv ia", + "Ġintest ine", + "Ġinnoc uous", + "cess ive", + "ag ra", + "ĠMet roid", + "G rant", + "8 55", + "ģ ĸ", + "Ġ\" _", + "ãĥĥ ãĥī", + "Ġappra isal", + "ĠFred dy", + "04 6", + "Ġ40 6", + "Ġ18 30", + "Ġd ocking", + "St atic", + "Ġp ont", + "ĠVolt age", + "ĠSt ead", + "ĠMort gage", + "ĠJon ah", + "Y L", + "CLASS IFIED", + "Ġas bestos", + "nik ov", + "Ġcoll agen", + "ĠOrb ital", + "P ocket", + "7 99", + "Ġhy brids", + "inc hes", + "Ġinv oice", + "und y", + "Ġinequ alities", + "T rend", + "w ashed", + "B ALL", + "Ġluc id", + "ĠComment ary", + "Ġw itty", + "Br andon", + "Ġbru ising", + "Ġ6 20", + "es cent", + "box ing", + "P OL", + "Ġ3 78", + "R ect", + "Ġlic ences", + "ĠMcG ee", + "p ressed", + "D anny", + "Ġj ammed", + "ord inate", + "Ġle th", + "Ġdistingu ishes", + "ĠYam aha", + "IL S", + "ĠH ume", + "ĠC ategories", + "Rober ts", + "Ch art", + "Ġbeet le", + "ĠGra veyard", + "Ġ($ )", + "o ÄŁ", + "Ġtw ilight", + "are lla", + "á ½", + "Ġbooth s", + "ĠH HS", + "ĠFeld man", + "Ġexcav ation", + "Ġphilosoph ies", + "at ography", + "ĠGar age", + "te chnology", + "Ġunfor gettable", + "Ġver ifying", + "Ġsubord inates", + "E ls", + "Ġne b", + "G aming", + "EN A", + "ĠAchieve ment", + "it ters", + "ĠG abe", + "Ġd umps", + "for cer", + "Ġpo ignant", + "ĠM BA", + "ĠHe idi", + "ime i", + "Ġm ages", + "Ġliber ate", + "Ġcircum cised", + "ĠMer maid", + "ĠMat th", + "t ogether", + "ĠW ichita", + "Ġstore front", + "ĠAd in", + "V II", + "Four th", + "Ġexplore rs", + "W ER", + "Not able", + "Bro ok", + "m ens", + "F aith", + "-------- -", + "ĠJ ou", + "¬ ¼", + "Ġpine apple", + "Ġam alg", + "el n", + "ark able", + "ĠãĤµ ãĥ¼ãĥĨãĤ£", + "ĠãĤµãĥ¼ãĥĨãĤ£ ãĥ¯ãĥ³", + "Ġov arian", + "ĠE choes", + "Ġhairc ut", + "Ġp av", + "Ġch illed", + "anas ia", + "Ġsty led", + "Ġd ab", + "ni per", + "Ġminister ial", + "ĠD UP", + "T an", + "Ġsul ph", + "ĠD eter", + "ĠBo hem", + "od an", + "Ġeduc ator", + "â ĵĺ", + "sp ir", + "Ch icken", + "ĠE leanor", + "Ġqu i", + "Ġheav iest", + "Ġgrasp ed", + "U RA", + "Ġcro oked", + "Jess ica", + "pro blem", + "Ġpred etermined", + "Ġman iac", + "Ġbreath s", + "ĠLauder dale", + "Ġh obbies", + "y z", + "Cr ime", + "Ġcharism a", + "d L", + "Ġle aping", + "Ġk ittens", + "Ang elo", + "ĠJ ACK", + "ĠSu zanne", + "Ġhal ting", + "ENT ION", + "Ġswall owing", + "ĠEarthqu ake", + "Ġeight eenth", + "ĠN IC", + "ĠIN F", + "ĠCons cious", + "Ġparticular s", + "circ le", + "7 40", + "Ġbene volent", + "Ġ7 47", + "Ġ4 90", + "Ġr undown", + "ĠVal erie", + "ĠB UR", + "Ġcivil isation", + "ĠS chn", + "W B", + "ot ide", + "intern ational", + "Ġj ohn", + "Ġ19 02", + "Ġpe anuts", + "Ġflav ored", + "k us", + "Ġro ared", + "Ġcut off", + "é £", + "Ġorn ament", + "Ġarchitect ures", + "Ġ3 69", + "ol or", + "ĠWild e", + "ĠC RC", + "ĠAdjust ed", + "Ġprov oking", + "land ish", + "Ġrational ity", + "Ġjust ifies", + "Ġdisp el", + "Ġa meric", + "ĠPol es", + "Ø ©", + "Ġen vis", + "ĠD oodle", + "ä½ ¿", + "igs aw", + "auld ron", + "Techn ical", + "T een", + "up hem", + "ĠX iang", + "Ġdetract ors", + "ĠZ i", + "ĠJournal ists", + "Ġconduc ive", + "ĠVolunte ers", + "Ġs d", + "Know ing", + "Ġtrans missions", + "ĠPL AN", + "ĠL IB", + "Ġall uded", + "Ġob e", + "Ġd ope", + "ĠGold stein", + "Ġwavelength s", + "ĠDest ination", + "nd a", + "ug i", + "Ġattent ive", + "ĠLe an", + "ral tar", + "Ġman g", + "mb uds", + "ak ings", + "b ender", + "Ġacc ol", + "Ġcraw led", + "N OW", + "Min nesota", + "Ġflour ished", + "ĠZ up", + "ĠSuper visor", + "ĠOliv ier", + "Ex cellent", + "Ġwid en", + "D one", + "Ġw ig", + "Ġmiscon ceptions", + "Cor p", + "W an", + "Ġvener able", + "ĠNot ably", + "ĠKling on", + "an imate", + "Bo ost", + "ĠS AY", + "miss ing", + "ibli ography", + "mel on", + "Ġpay day", + "Ø ³", + "bo le", + "Ġve iled", + "ĠAl phabet", + "It alian", + "Ġever lasting", + "ĠR IS", + "ĠC ree", + "rom pt", + "Ġh ating", + "Ġgrin ning", + "Ġge ographically", + "OS H", + "Ġwe eping", + "ĠÂłĠÂłĠÂłĠÂł ĠÂłĠÂłĠÂłĠÂł", + "Ġimpe cc", + "Let ter", + "Ġblo ated", + "PL A", + "ĠFe in", + "Ġper sever", + "Th under", + "Ġa ur", + "ĠR L", + "Ġpit falls", + "âĸ º", + "Ġpredomin ant", + "Ġ5 25", + "7 18", + "AP E", + "7 14", + "Ġfarm land", + "ĠQ iao", + "Ġv iolet", + "ĠBah amas", + "Ġinflic ting", + "ĠE fficiency", + "Ġhome brew", + "Ġundert ook", + "Ġcur ly", + "ĠHard ing", + "man ia", + "59 6", + "Ġtem pered", + "Ġhar rowing", + "ĠP ledge", + "ĠFranken stein", + "è ª", + "M otion", + "Ġpredict ably", + "ĠExpl osion", + "oc using", + "er d", + "col o", + "FF ER", + "Ġback field", + "ĠV IDE", + "ue bl", + "N arr", + "ĠArg ument", + "Ġgen omic", + "Ġbout ique", + "Ġbatt ed", + "ĠB inary", + "Ġg amb", + "ĠRh ythm", + "67 3", + "Ġa float", + "ĠOlymp ia", + "Y ING", + "Ġend if", + "is in", + "Ġwin ters", + "Ġsc attering", + "I v", + "D istance", + "Ġtr u", + "ĠCom fort", + "Ġne xus", + "Ġair flow", + "ĠByz antine", + "p ayers", + "con i", + "ĠB etsy", + "D eal", + "ĠN ug", + "ĠContin ent", + "red ibly", + "Ġoptim izing", + "al beit", + "Ġec static", + "ĠPro to", + "ç ·", + "iv ot", + "âĸ Ħ", + "em p", + "rou nder", + "Ġcl out", + "ĠI ST", + "66 3", + "ĠDoll ars", + "ĠD AC", + "Ġsubsc ribed", + "Ġrehears al", + "Ġam ps", + "ĠSh ang", + "es m", + "Ġspr inkle", + "Ġassail ant", + "ĠO o", + "ĠCoin base", + "T act", + "Ġret ina", + "Ġn uns", + "R ON", + "att o", + "Ġj ug", + "ĠSV G", + "Ġb ikini", + "ĠFI LE", + "ĠFound ers", + "ep ort", + "ĠK P", + "Ġrest ores", + "ĠTh ick", + "Ġash ore", + "Ġappro vals", + "R ender", + "M AG", + "G raham", + "ĠCort ana", + "ãĥ³ ãĤ¸", + "ss h", + "or ians", + "ars ity", + "ĠInsp ired", + "u pper", + "Ġsign alling", + "Ġreb uke", + "Ġfl ares", + "Ġdownt ime", + "Stud ies", + "Ġstagn ation", + "ĠSequ ence", + "Ġgr unt", + "Ġass ures", + "ĠPL A", + "59 2", + "Ġintra ven", + "d epend", + "Sus an", + "ĠManz iel", + "Man ia", + "Cont ract", + "Ġsl ams", + "Ġcult ured", + "Ġcred itor", + "L IST", + "ĠH UM", + "ĠChatt anooga", + "serv ed", + "Ġclo aked", + "ĠF TP", + "p owder", + "ĠSt ella", + "uct ive", + "Ġcheap ly", + "ĠMU CH", + "ĠGalile o", + "Ġsu ites", + "spe ech", + "Ġdeliber ations", + "ĠCh ips", + "« ĺ", + "Bal ance", + "ĠWyn ne", + "ĠAk ron", + "Ass et", + "Ġhon oured", + "Ġed ged", + "Like wise", + "anim ous", + "ĠW age", + "ĠEz ek", + "ad vertisement", + "ĠRT X", + "ĠM AD", + "Ġmigr ating", + "ĠS QU", + "Ġ4 75", + "Ed ited", + "Ġshorth and", + "ĠBas ics", + "Ġcro tch", + "ĠEV EN", + "Ġv m", + "effic iency", + "Ġcal ves", + "ĠF rie", + "ĠBrill iant", + "Ġstri kers", + "Ġrepent ance", + "Ġarter ies", + "r l", + "B ed", + "h ap", + "Ġcrypt ography", + "ĠSab res", + "Ġ4 14", + "vi ks", + "ih ara", + "aps es", + "T alking", + "Ġintertw ined", + "Ġdoc ks", + "Ġalle le", + "ĠArt ifact", + "ĠH IM", + "t orn", + "ç ķ", + "Ġop acity", + "ĠE ly", + "os uke", + "Ġn ipple", + "Ġhand written", + "ĠV K", + "ĠChamber lain", + "ĠLa os", + "ig raph", + "g row", + "Ġtr illions", + "Ġdescend ant", + "ĠSail or", + "as uring", + "Ġce ilings", + "ĠWare house", + "f lying", + "ĠGl ow", + "Ġn ont", + "Ġmiscar riage", + "Ġrig s", + "Ġmin istries", + "Ġelabor ated", + "Ġdel usional", + "ĠHum ane", + "Ġ3 79", + "n ets", + "Ġblack out", + "add ers", + "Ġn p", + "ĠT ire", + "ro sc", + "Ġsub div", + "Ġlink age", + "Ġchron ological", + "ĠHER O", + "Ġres ettlement", + "ĠVin yl", + "Ġpast oral", + "ĠMob il", + "ĠBar bar", + "Co oldown", + "ĠF ritz", + "c riminal", + "re pe", + "Ġbell ig", + "ĠBre ed", + "Ġ4 18", + "Ġsem blance", + "ij k", + "Ġcur tail", + "Ġclin ch", + "cont ained", + "ĠProm pt", + "ast on", + "Ġw i", + "Ġpursu its", + "5 15", + "ĠGl oss", + "Ġfl ips", + "Ġcoup ons", + "Ġcl oning", + "ĠLike ly", + "Rem oved", + "ĠQu artz", + "r ices", + "ĠSpe ars", + "Ġp ious", + "Ġdep reciation", + "ĠD are", + "oun ces", + "am az", + "O nt", + "Ġp innacle", + "d ocker", + "0 26", + "ĠW yr", + "ĠPro per", + "Ë Ī", + "n il", + "By tes", + "Ġseek er", + "t rial", + "Ġunf olds", + "ĠMar se", + "Ġextravag ant", + "ĠSurviv ors", + "RED ACTED", + "ĠSpeed way", + "ĠCra igslist", + "sub mit", + "ĠGener ations", + "Ġup holding", + "Ġblood stream", + "ĠMiss ions", + "ĠL awn", + "Ġlim bo", + "ene i", + "H uh", + "ĠWild cats", + "pre p", + "ĠMark us", + "ĠFor bidden", + "rit ic", + "IN O", + "Ġexhib iting", + "requ ent", + "ch uk", + "Ġhabit ual", + "ĠComp atibility", + "Dr ag", + "RIP T", + "uj ah", + "GR OUND", + "Ġdelinqu ent", + "Ġburn er", + "Ġcontempor aries", + "Ġgimm ick", + "load s", + "Ġno zzle", + "p odcast", + "ĠW ak", + "ĠStat en", + "ĠK uh", + "ãģ ĵ", + "inter rupted", + "Ġinv incible", + "ĠBurn ett", + "cig arette", + "ĠPeb ble", + "ĠTem porary", + "ĠMar ino", + "58 2", + "Ġwast eland", + "ident ly", + "T x", + "Ġr ite", + "ĠPan asonic", + "ĠM iddles", + "ĠHort on", + "ae us", + "Ġc uring", + "Ġm ats", + "Ġadj ourn", + "Ġfears ome", + "pe z", + "bo ats", + "Ġpro pell", + "Ġconflic ted", + "ĠAng er", + "Ġinsurg ent", + "K arl", + "Ġco ales", + "Ġsouth western", + "Ġdis su", + "ĠO vert", + "******** ****", + "Ġbox ed", + "ĠBr une", + "aa a", + "Ġgard ening", + "ĠEng el", + "tr acks", + "Ġpur ified", + "Ġplace holder", + "ĠL ikes", + "Ġd an", + "G ab", + "Ġe ct", + "ĠF aw", + "ĠEl iot", + "Ġ' ,", + "otrop ic", + "ĠRu in", + "hed on", + "Ġca ul", + "Ġa ft", + "ĠCad illac", + "gh a", + "ass ian", + "ud eb", + "ĠT ick", + "Ġadjust s", + "AR GET", + "5 37", + "isc he", + "ant y", + "ĠFried rich", + "ĠBl izz", + "ĠA OL", + "Camp aign", + "Ġmamm al", + "ĠVe il", + "ĠK ev", + "ĠMaur it", + "ĠDam ien", + "N ation", + "E astern", + "Ġ{ :", + "Ġ= ================================", + "Ġstereotyp ical", + "Ġatt ic", + "ĠCy borg", + "requ ire", + "Ġaward ing", + "ĠPap ua", + "bt n", + "b ent", + "B oo", + "Ġ( =", + "ĠX ander", + "ĠSomers et", + "Ġcatch y", + "Ġcert ify", + "STR UCT", + "Ġit al", + "Ġt ides", + "ĠBr ands", + "G ray", + "comp etitive", + "Ġcur ator", + "ĠD G", + "omin ium", + "ĠGM Os", + "ci ating", + "ĠCarm en", + "ow ard", + "Balt imore", + "Ġr gb", + "C u", + "Ġwip es", + "spe ll", + "IT NESS", + "Ġsummar izes", + "ĠRe vis", + "Ġwhistlebl owers", + "ĠBre ach", + "Ġcro chet", + "k os", + "ews ki", + "Ġrep et", + "Ġcrim son", + "ĠKar achi", + "read able", + "dim ension", + "ĠI gor", + "ild ed", + "ĠZ ed", + "ĠKe ane", + "ĠCos metic", + "DE P", + "Ġretreat ing", + "ĠU A", + "ens ical", + "Ġd usk", + "ĠDick ens", + "Ġaren as", + "ĠPass age", + "level s", + "Ġcur v", + "P ope", + "Ġch ores", + "ĠEl ise", + "ĠComp ass", + "b ub", + "Ġmamm alian", + "ĠSans krit", + "ĠAN C", + "ĠCr ack", + "Q ual", + "L aun", + "amp unk", + "Ġlearn ers", + "Ġglam orous", + "Ġfur the", + "erm ott", + "c and", + "Gener ic", + "Ġnarr ated", + "Ġdisorder ly", + "ĠTrans actions", + "ĠDet ention", + "ĠR oku", + "Ä į", + "Ġunder statement", + "ĠS aur", + "ĠRodrig o", + "ĠAS AP", + "S in", + "Ġre joice", + "Method s", + "Ġelectro de", + "Ġworsh ipped", + "Ġid i", + "ĠPhys icians", + "Ġpop up", + "Ġde ft", + "ĠRem oval", + "ĠBu enos", + "ver bs", + "Ġfun k", + "ush a", + "rict ion", + "ore a", + "ĠBang alore", + "ĠKen obi", + "zz i", + "Ġnorm ative", + "Ġgobl ins", + "Ġcaf es", + "ĠUN CLASSIFIED", + "ĠF ired", + "S IGN", + "Ġs clerosis", + "ĠV oter", + "ĠSon ny", + "ĠExt end", + "ĠEV s", + "Ar senal", + "Ġp si", + "Ġwid est", + "ĠT us", + "Ġlo oms", + "Ġjust ifying", + "ĠGr anger", + "è ¯", + "Ref er", + "58 3", + "Ġflour ishing", + "ab re", + "Ġr ave", + "ĠCont ra", + "Ġ18 98", + "Add s", + "Ġf ul", + "ĠCo oke", + "some one", + "= #", + "67 1", + "Ġy ak", + "Ġar te", + "ĠMis cellaneous", + "ĠDet ection", + "ĠCl ancy", + "â ģ", + "ass ies", + "Ġval iant", + "ĠFemin ist", + "cor ruption", + "V el", + "P ear", + "Ġsucc inct", + "Ġquick est", + "k w", + "Ġsp itting", + "ĠL ibraries", + "åħ ī", + "ant z", + "D ad", + "ĠSpec ifications", + "rup ulous", + "and r", + "RES ULTS", + "Ġsnow ball", + "Ġpred is", + "ĠB axter", + "ĠNurs ing", + "ĠCh aff", + "s we", + "Ġout age", + "Ġnest ing", + "Ġnotor iety", + "tr igger", + "on ite", + "j on", + "Ġf ou", + "ook ed", + "ĠCelebr ity", + "re ality", + "Ġfat ig", + "Ġhug ging", + "Ġbother s", + "ĠPan zer", + "ĠCh andra", + "fig ured", + "Ġvol ts", + "ĠCloud s", + "Ġfee ble", + "ĠCur ve", + "ĠAs us", + "78 6", + "abs or", + "ĠV ICE", + "ĠH ess", + "Ġmanufact ures", + "Ġgri zz", + "ĠPower ful", + "ac id", + "Ġsub sections", + "ĠKrug man", + "ĠAl ps", + "is u", + "Ġsequ est", + "ĠUlt ron", + "ĠT inker", + "ĠGo ose", + "Ġmism atch", + "Att orney", + "Ġmorph ology", + "ĠSix ers", + "ut tered", + "ĠE LECT", + "gr an", + "Rus sell", + "ĠG SL", + "Ġfort night", + "Ġ. )", + "Ġapost le", + "pr one", + "el ist", + "Unt itled", + "ĠIm plementation", + "ist ors", + "Ġtank er", + "Ġpl ush", + "Ġattend ants", + "ĠT ik", + "ĠGreen wich", + "ĠY on", + "ĠSP L", + "cell s", + "unt led", + "S olution", + "ĠQu é", + "Ġvac ated", + "Ġupt ick", + "ĠMer idian", + "æ ĥ", + "ĠDr ill", + "9 25", + "58 4", + "Ġrenov ated", + "ĠKub rick", + "zy k", + "Ġl ousy", + "pp el", + "ohyd rate", + "ĠI zzy", + "lesi astical", + "CC C", + "ĠAj ax", + "Ġad apters", + "ĠPetra eus", + "Ġaffirm ation", + "ĠST OR", + "le ms", + "ad oes", + "ĠConstantin ople", + "Ġp onies", + "Ġl ighthouse", + "Ġadherent s", + "ĠBre es", + "omorph ic", + "Fight ing", + "Ġpl aster", + "ĠP VC", + "ĠOb st", + "Ġdear ly", + "ĠTo oth", + "icks on", + "Ġsh aming", + "P lex", + "A gg", + "ĠâĢ¦ \"", + "Ġsub reddits", + "Ġpige on", + "ĠResident ial", + "ĠPass ing", + "Ġl um", + "ĠP ension", + "Ġpessim istic", + "Ġ4 32", + "z inski", + "c ade", + "0 75", + "Ġapolog ised", + "iy ah", + "Put ting", + "Ġgloom y", + "ĠLy me", + "=-=-=-=- =-=-=-=-", + "ĠT ome", + "ĠPsych iatric", + "ĠH IT", + "c ms", + "ap olog", + "Ġbreak er", + "Ġdeep en", + "Ġtheor ist", + "ĠHigh lands", + "Ġb aker", + "Ġst aples", + "Ġinterf ered", + "ĠAb ortion", + "jo ined", + "ch u", + "Ġform ulate", + "Ġvacc inations", + "Ġban ter", + "phe us", + "Ġoutfield er", + "ĠM eter", + "Ġ# ####", + "Ġ18 95", + "Ġnarrow ing", + "ĠST ORY", + "f p", + "ĠC ST", + "ign ore", + "Ġproclaim ing", + "ĠR U", + "ĠB ALL", + "yn a", + "65 3", + "Ġpos it", + "P RE", + "59 4", + "ĠRegist rar", + "ĠPil grim", + "ic io", + "Ġpre tt", + "Ġlif eless", + "Ġ__ _", + "Ne igh", + "ĠCh urches", + "orn o", + "Ġor cs", + "Ġkind red", + "ĠAud it", + "Ġmillenn ial", + "ĠPers ia", + "g ravity", + "ĠDis ability", + "ĠD ARK", + "W s", + "od on", + "Ġgrand daughter", + "ĠBro oke", + "ĠA DA", + "ER A", + "Ġpick ups", + "ĠWil kinson", + "ĠSh ards", + "ĠN K", + "Ġexp el", + "ĠKis lyak", + "Ġj argon", + "Ġpolar ized", + "ian e", + "Pub lisher", + "Ġreb utt", + "Ġapprehens ion", + "ĠK essler", + "Ġpr ism", + "F UL", + "19 64", + "ĠL oll", + "ä ¿", + "le thal", + "Å Ł", + "Ġg hetto", + "Ġb oulder", + "ĠSlow ly", + "ĠOsc ars", + "ĠInst ruction", + "ĠUl tr", + "ĠM oe", + "N ich", + "ĠP ATH", + "( *", + "ĠRE LEASE", + "un ing", + "rou se", + "en eg", + "Ġre imb", + "ĠDet ected", + "Do S", + "Ġster ling", + "Ġaggreg ation", + "ĠLone ly", + "ĠAtt end", + "hig her", + "Ġairst rike", + "ks on", + "SE LECT", + "Ġdef lation", + "ĠHer rera", + "C ole", + "rit ch", + "Ġadvis able", + "F ax", + "Ġwork around", + "Ġp id", + "mort em", + "ers en", + "Ġtyp o", + "Ġal um", + "78 2", + "ĠJam al", + "script s", + "Ġcapt ives", + "ĠPres ence", + "ĠLie berman", + "angel o", + "Ġalcohol ism", + "ass i", + "Ġrec ite", + "Ġgap ing", + "Ġbask ets", + "ĠG ou", + "Brow ser", + "ne au", + "Ġcorrect ive", + "und a", + "sc oring", + "ĠX D", + "Ġfil ament", + "Ġdeep ening", + "ĠStain less", + "Int eger", + "Ġbu ggy", + "Ġten ancy", + "ĠMub arak", + "Ġt uple", + "ĠD roid", + "ĠS itting", + "Ġforfe it", + "ĠRasm ussen", + "ixt ies", + "es i", + "ĠKim mel", + "Ġmetic ulously", + "Ġap opt", + "ĠS eller", + "08 8", + "ec ake", + "hem atically", + "T N", + "Ġmind less", + "Ġdig s", + "ĠAcc ord", + "ons ense", + "em ing", + "br ace", + "Ġe Book", + "ĠDist ribut", + "ĠInvest ments", + "w t", + "] ),", + "beh avior", + "56 3", + "Ġbl inding", + "ĠPro testers", + "top ia", + "Ġreb orn", + "ĠKel vin", + "ĠDo ver", + "ĠD airy", + "ĠOut s", + "Ġ[ /", + "Ï Ģ", + "b p", + "ĠVan ity", + "ĠRec ap", + "ĠHOU SE", + "ĠF ACE", + "Ġ4 22", + "69 2", + "ĠAnt ioch", + "cook ed", + "Ġcoll ide", + "Ġa pr", + "Ġsle eper", + "ĠJar vis", + "Ġalternative ly", + "ĠLe aves", + "ĠM aw", + "Ġantiqu ity", + "ĠAdin ida", + "Ġab user", + "Poké mon", + "Ġass orted", + "ĠRev ision", + "ĠP iano", + "ĠG ideon", + "O cean", + "Ġsal on", + "Ġbust ling", + "ogn itive", + "ĠRah man", + "Ġwa iter", + "Ġpres ets", + "ĠO sh", + "ĠG HC", + "oper ator", + "Ġrept iles", + "Ġ4 13", + "ĠG arr", + "ĠCh ak", + "Ġhas hes", + "Ġfail ings", + "Ġfolk lore", + "Ġab l", + "ĠC ena", + "ĠMac Arthur", + "ĠCOUR T", + "Ġperipher y", + "app ers", + "Ġreck oned", + "ĠInf lu", + "ĠC ET", + "Ġ3 72", + "ĠDefin itive", + "ass ault", + "4 21", + "Ġreservoir s", + "Ġd ives", + "ĠCo il", + "DA Q", + "Ġvivid ly", + "ĠR J", + "ĠBel lev", + "Ġec lectic", + "ĠShow down", + "ĠK M", + "ip ed", + "reet ings", + "ĠAs uka", + "L iberal", + "ĠÏ Ħ", + "Ġbystand ers", + "ĠGood win", + "uk ong", + "S it", + "ĠT rem", + "Ġcrim inally", + "ĠCirc us", + "ch rome", + "88 7", + "Ġnan op", + "ĠOb i", + "ĠL OW", + "o gh", + "ĠAuth ors", + "ob yl", + "Ur ban", + "Ġt i", + "ĠWe ir", + "t rap", + "ag y", + "Ġparent heses", + "Ġout numbered", + "Ġcounter productive", + "ĠTob ias", + "ub is", + "P arser", + "ST AR", + "Ġsyn aptic", + "ĠG ears", + "Ġh iber", + "Ġdebunk ed", + "Ġex alted", + "aw atts", + "H OU", + "Ch urch", + "ĠPix ie", + "ĠU ri", + "ĠForm ation", + "ĠPred iction", + "C EO", + "Ġthro tt", + "ĠBrit ann", + "ĠMad agascar", + "ë ĭ", + "Ġbill boards", + "ĠRPG s", + "ĠBe es", + "complete ly", + "F IL", + "Ġdoes nt", + "ĠGreen berg", + "re ys", + "Ġsl ing", + "Ġempt ied", + "ĠPix ar", + "ĠDh arma", + "l uck", + "ingu ished", + "Ġend ot", + "Ġbab ys", + "05 9", + "che st", + "r ats", + "Ġr idden", + "Ġbeet les", + "Ġillum inating", + "Ġfict itious", + "ĠProv incial", + "Ġ7 68", + "Ġshe pherd", + "ĠR ender", + "Ġ18 96", + "C rew", + "Ġmold ed", + "ĠXia omi", + "ĠSp iral", + "Ġdel im", + "Ġorgan ising", + "Ġho ops", + "ĠBe i", + "z hen", + "Ġfuck in", + "Ġdec ad", + "Ġun biased", + "am my", + "sw ing", + "Ġsmugg led", + "Ġk ios", + "ĠP ERSON", + "ĠInquis itor", + "Ġsnow y", + "Ġscrap ing", + "ĠBurg ess", + "P tr", + "ag ame", + "R W", + "Ġdro id", + "ĠL ys", + "ĠCass andra", + "Jac ob", + "Ġ35 4", + "Ġpast ure", + "Ġfr anc", + "ĠScot ch", + "ĠEnd s", + "ĠI GF", + "def inition", + "Ġhyster ical", + "ĠBrown e", + "77 1", + "Ġmobil ization", + "æ ķ", + "iqu eness", + "Th or", + "Ġspear headed", + "Ġembro iled", + "Ġconject ure", + "jud icial", + "Ch oice", + "Ġpaper back", + "P ir", + "Ġrec overs", + "ĠSur ge", + "ĠSh ogun", + "ĠPed iatrics", + "ãģ ł", + "Ġsweep s", + "ĠLabor atories", + "ĠP acks", + "al us", + "add in", + "Ġhead lights", + "g ra", + "Ev idence", + "COL OR", + "Ad min", + "Ĭ ±", + "Ġconco ct", + "s ufficient", + "Ġun marked", + "Ġrich ness", + "Ġdiss ertation", + "Ġseason ing", + "Ġg ib", + "ĠM ages", + "un ctions", + "ĠN id", + "che at", + "ĠTM Z", + "c itizens", + "ĠCatholic ism", + "n b", + "Ġdisemb ark", + "ĠPROG RAM", + "a ques", + "Ty ler", + "Or g", + "ĠSl ay", + "ĠN ero", + "ĠTown send", + "IN TON", + "te le", + "Ġmes mer", + "9 01", + "Ġfire ball", + "ev idence", + "aff iliated", + "ĠFrench man", + "ĠAugust a", + "0 21", + "Ġs led", + "Ġre used", + "ĠImmun ity", + "Ġwrest le", + "assemb led", + "Mar ia", + "Ġgun shots", + "ĠBarb ie", + "Ġcannabin oids", + "ĠTo ast", + "ĠK inder", + "IR D", + "Ġre juven", + "Ġg ore", + "Ġrupt ure", + "Ġbre aching", + "ĠCart oon", + "Ġ4 55", + "ĠPale o", + "6 14", + "Ġspe ars", + "ĠAm es", + "ab us", + "Mad ison", + "GR OUP", + "Ġab orted", + "y ah", + "Ġfel on", + "Ġcaus ation", + "Ġprep aid", + "Ġp itted", + "op lan", + "ĠShel ley", + "ĠRus so", + "ĠP agan", + "Ġwill fully", + "ĠCan aver", + "und rum", + "ĠSal ary", + "ĠAr paio", + "read er", + "ĠR ational", + "ĠOver se", + "ĠCa uses", + "Ġ* .", + "Ġw ob", + "Ke ith", + "ĠCons ent", + "man ac", + "77 3", + "6 23", + "Ġfate ful", + "et imes", + "Ġspir ited", + "ĠD ys", + "Ġhe gemony", + "Ġboy cot", + "ĠEn rique", + "em outh", + "Ġtim elines", + "ĠSah ara", + "ĠRel ax", + "ĠQuin cy", + "ĠLess ons", + "ĠE QU", + "SE A", + "N K", + "ĠCost co", + "Incre ase", + "Ġmotiv ating", + "ĠCh ong", + "am aru", + "ĠDiv ide", + "Ġped igree", + "ĠTasman ia", + "ĠPrel ude", + "L as", + "9 40", + "57 4", + "Ġch au", + "ĠSp iegel", + "un ic", + "-- >", + "ĠPhil ips", + "ĠKaf ka", + "Ġuphe aval", + "Ġsent imental", + "Ġsa x", + "ĠAk ira", + "ser ial", + "Mat rix", + "Ġelect ing", + "Ġcomment er", + "ĠNeb ula", + "ple ts", + "ĠNad u", + "ĠAd ren", + "Ġen shr", + "ĠR AND", + "fin ancial", + "ĠCly de", + "uther ford", + "Ġsign age", + "Ġde line", + "Ġphosph ate", + "rovers ial", + "f ascist", + "ĠV all", + "ĠBeth lehem", + "Ġfor s", + "Ġeng lish", + "S olid", + "N ature", + "Ġv a", + "ĠGu ests", + "Ġtant al", + "Ġauto immune", + ";;;;;;;; ;;;;", + "ĠTot ally", + "ĠO v", + "Ġdef ences", + "ĠCoc onut", + "Ġtranqu il", + "Ġpl oy", + "Ġflav ours", + "ĠFl ask", + "ãĤ¨ ãĥ«", + "ĠWest on", + "ĠVol vo", + "8 70", + "Ġmicro phones", + "ver bal", + "R PG", + "Ġi ii", + "; }", + "0 28", + "Ġhead lined", + "Ġprim ed", + "Ġho ard", + "ĠSh ad", + "ĠEN TER", + "Ġtri angular", + "Ġcap it", + "l ik", + "ĠAn cients", + "Ġl ash", + "Ġconv ol", + "Ġcolon el", + "en emy", + "G ra", + "Ġpub s", + "ut ters", + "Ġassign s", + "ĠPen et", + "ĠMon strous", + "ĠBow en", + "il ver", + "H aunted", + "ĠD ing", + "start ed", + "pl in", + "Ġcontamin ants", + "ĠDO E", + "ff en", + "ĠTechn ician", + "R y", + "Ġrob bers", + "Ġhot line", + "ĠGuard iola", + "ĠKau fman", + "row er", + "ĠDres den", + "ĠAl pine", + "E lf", + "Ġf mt", + "ĠS ard", + "urs es", + "g pu", + "Un ix", + "Ġunequiv ocally", + "ĠCitizens hip", + "qu ad", + "m ire", + "ĠS weeney", + "B attery", + "6 15", + "Ġpanc akes", + "Ġo ats", + "M aps", + "ĠCont rast", + "mbuds man", + "ĠE PS", + "Ġsub committee", + "Ġsour cing", + "Ġs izing", + "ĠBuff er", + "ĠMand atory", + "Ġmoder ates", + "ĠPattern s", + "ĠCh ocobo", + "ĠZ an", + "ĠSTAT ES", + "ĠJud ging", + "ĠIn her", + "* :", + "Ġb il", + "ĠY en", + "Ġexh ilar", + "oll ower", + "z ers", + "Ġsn ug", + "max imum", + "Ġdesp icable", + "ĠP ACK", + "ĠAn nex", + "Ġsarcast ic", + "Ġlate x", + "Ġt amp", + "ĠS ao", + "b ah", + "ĠRe verend", + "ĠChin atown", + "ĠA UT", + "d ocumented", + "ĠGA BA", + "ĠCan aan", + "ĠÙ ħ", + "Ġgovern s", + "pre v", + "E sc", + "ĠEst imates", + "OS P", + "Ġendeav our", + "ĠCl osing", + "omet ime", + "every one", + "Ġwor sen", + "Ġsc anners", + "Ġdev iations", + "ĠRobot ics", + "ĠCom pton", + "Ġsorce rer", + "Ġend ogenous", + "Ġem ulation", + "ĠPier cing", + "ĠA ph", + "ĠS ocket", + "Ġb ould", + "ĠO U", + "ĠBorder lands", + "Ġ18 63", + "G ordon", + "ĠW TO", + "Ġrestrict s", + "Ġmosa ic", + "Ġmel odies", + "ç Ħ", + "T ar", + "Ġdis son", + "ĠProv ides", + "Ġ ......", + "b ek", + "F IX", + "Ġbro om", + "ans hip", + "Do ctors", + "Ġner ds", + "ĠReg ions", + "na issance", + "Ġmet e", + "Ġcre pt", + "pl ings", + "Ġgirlfriend s", + "kn it", + "ig ent", + "ow e", + "Ġus hered", + "ĠB az", + "M obil", + "4 34", + "ĠPres ents", + "orig in", + "Ġins omnia", + "ĠA ux", + "4 39", + "ĠCh ili", + "irs ch", + "G AME", + "Ġgest ation", + "alg ia", + "rom ising", + "$ ,", + "c row", + "ĠIn spection", + "at omic", + "Rel ations", + "J OHN", + "rom an", + "ĠClock work", + "ĠBak r", + "m one", + "M ET", + "Ġthirst y", + "Ġb c", + "Ġfacult ies", + "R um", + "Ġnu ance", + "ĠD arius", + "ple ting", + "fter s", + "etch up", + "Reg istration", + "ĠK E", + "R ah", + "Ġpref erential", + "ĠL ash", + "ĠH H", + "Val id", + "ĠN AV", + "Ġstar ve", + "ĠG ong", + "z ynski", + "ĠAct ress", + "Ġw ik", + "Ġun accompanied", + "lv l", + "Br ide", + "AD S", + "ĠCommand o", + "ĠVaugh n", + "Wal let", + "Ġho pping", + "ĠV ie", + "Ġcave ats", + "Ġal as", + "if led", + "ab use", + "66 1", + "Ġib n", + "Ġg ul", + "Ġrob bing", + "t il", + "IL A", + "Ġmit igating", + "Ġapt ly", + "Ġty rant", + "Ġmid day", + "ĠGil more", + "ĠDe cker", + "Ġ§ §", + "part ial", + "Ex actly", + "Ġphen otype", + "Ġ[+ ]", + "ĠP lex", + "ĠI ps", + "vers ions", + "Ġe book", + "Ġch ic", + "g ross", + "\":\" \"},{\"", + "ĠSur prisingly", + "M organ", + "Ġresid ues", + "ĠConf ederation", + "in feld", + "Ġl yr", + "mod erate", + "Ġperpend icular", + "V K", + "Ġsynchron ized", + "Ġrefres hed", + "Ġad ore", + "ĠTor ment", + "ol ina", + "Ġ26 00", + "Item Tracker", + "Ġp ies", + "ĠF AT", + "ĠR HP", + "0 48", + "ĠRES P", + "ĠB J", + "all ows", + "P and", + "Ġunw elcome", + "ĠV oc", + "ĠBast ard", + "ĠO W", + "ĠL AR", + "ĠHeal er", + "Environment al", + "ĠKen yan", + "ĠTr ance", + "ĠP ats", + "Ġali ases", + "ĠGar field", + "Ġcampaign er", + "Ġadvance ments", + "ĠOkin awa", + "ĠC oh", + "ows ky", + "Ġstar ved", + "Ġsize able", + "Ġ: -)", + "Ġm RNA", + "Ġsusp ensions", + "ist ar", + "Scot land", + "Pr in", + "-------------------------------- ----------------", + "Ġ50 2", + "Ġteasp oons", + "Ġ10 50", + "Ġcoerc ive", + "ĠMason ic", + "edd ed", + "ĠPass enger", + "Ġl att", + "Ġbr aces", + "ĠSt eal", + "ĠNY T", + "ĠK ats", + "ĠCel est", + "ae z", + "T u", + "ĠCoul ter", + "ðŁ ĺ", + "Fl ickr", + "ĠWil mington", + "ith s", + "++ ;", + "Ġv ending", + "Ġneg ro", + "ĠPh i", + "ĠYellow stone", + "Call back", + "Ġsh ampoo", + "ĠSh ades", + "w at", + "Ġsuper human", + "Ġridic uled", + "Ġhol iest", + "om bo", + "Ġintern s", + "Ġh one", + "ĠPar agu", + "UR I", + "Ġd angling", + "ãĤ »", + "so v", + "ict ional", + "av ailability", + "Ġrev ocation", + "Ġd ow", + "in ic", + "ĠTHE IR", + "Ġis o", + "Ġout ings", + "ĠLeth al", + "Ġ) ))", + "Ġinacc ur", + "Ġout landish", + "Ġan us", + "let ico", + "id on", + "l ol", + "Ġun regulated", + "Ġsuccumb ed", + "Ġc uff", + "ĠWast eland", + "let al", + "Ġsub str", + "Ġcoff ers", + "Ġautom akers", + "ov i", + "ĠX ue", + "ĠDayton a", + "Ġjar ring", + "Ġf umes", + "Ġdisband ed", + "z ik", + "itt on", + "Ġstriking ly", + "Ġsp ores", + "Ad apter", + ".) :", + "ĠLynd on", + "ival ry", + "Ġor ally", + "Ġtumult uous", + "Ġdisple asure", + "Ġcon es", + "or rect", + "Ġappe ase", + "Ġder by", + "ĠTrip oli", + "ĠAl ess", + "Ġp oked", + "ĠGu ilty", + "v P", + "En ough", + "Ġorig inals", + "6 99", + "Ġrabb i", + "Ġproverb ial", + "Ġpostp one", + "el ope", + "ĠMist y", + "Ġstaff ed", + "ĠUn employment", + "redit ary", + "Ġdilig ent", + "re comm", + "me asures", + "as in", + "8 25", + "Ġpond s", + "Ġmm ol", + "ĠS AR", + "ĠC ARE", + "Ġ3 71", + "Ġclen ched", + "ĠCors air", + "Ġcaric ature", + "z n", + "att ach", + "ĠSch ro", + "spe ak", + "p ainted", + "ĠS uc", + "ĠE NT", + "Ġcell ul", + "ĠP aid", + "di agn", + "WH ERE", + "Ġtext ed", + "B arn", + "Ġret racted", + "ĠRe ferred", + "S av", + "Ġup keep", + "Ġwork places", + "ĠTok ens", + "Ġampl ify", + "cl inical", + "Ġmult ic", + "mber g", + "Ġconvol uted", + "Reg ion", + "5 65", + "ĠTop ic", + "Ġsn ail", + "Ġsal ine", + "Ġins urrection", + "ĠPet r", + "f orts", + "B AT", + "ĠNav ajo", + "Ġrud imentary", + "ĠLak sh", + "OND ON", + "Me asure", + "Ġtransform er", + "ĠGodd ard", + "Ġcoinc ides", + "ir in", + "R ex", + "ĠB ok", + "qu it", + "Ġshotgun s", + "Ġprolet arian", + "Ġsc orp", + "ĠAd a", + "5 14", + "Ġsl ander", + "record ed", + "Ġemb ell", + "ris ome", + "Ġapolog izing", + "ĠMul cair", + "ĠGib raltar", + "Cl a", + "Ġall ot", + "ĠAtt ention", + "Ġ4 33", + "le ave", + "Ġwh ine", + "ĠIss a", + "ĠFa ust", + "ĠBar ron", + "hen y", + "Ġvictim ized", + "J ews", + "Ġnurt uring", + "ett el", + "W inged", + "ĠSub tle", + "Ġflavor ful", + "ĠRep s", + "eng ed", + "call back", + "Ġdirection al", + "Ġcl asp", + "ĠDirect ions", + "plan et", + "icult ure", + "Hel per", + "ic ion", + "ac ia", + "Ġç ¥ŀ", + "Ġsur ges", + "Ġcan oe", + "ĠPrem iership", + "be en", + "Ġdef ied", + "ĠTro oper", + "Ġtrip od", + "Ġgas p", + "ĠE uph", + "ĠAd s", + "vern ight", + "high ly", + "R ole", + "Ġent angled", + "ĠZe it", + "6 18", + "ĠRust y", + "Ġhaven s", + "ĠVaugh an", + "HA EL", + "ĠSER VICE", + "/ ,", + "Ġstr icken", + "Ġdel usions", + "Ġb is", + "ĠH af", + "Ġgrat ification", + "Ġent icing", + "UN CH", + "Ad ams", + "ĠOL ED", + "ĠBeet le", + "Ġ18 99", + "ĠSO FTWARE", + "ateg or", + "V L", + "ĠTot em", + "ĠG ators", + "AT URES", + "Ġimped ance", + "Reg istered", + "ĠC ary", + "ĠAer ial", + "on ne", + "en ium", + "Ġd red", + "ĠBe g", + "Ġconcurrent ly", + "Ġsuper power", + "ĠX an", + "j ew", + "imes ter", + "ĠDick inson", + "âĶ ģ", + "F la", + "Ġp ree", + "ĠRoll ins", + "© ¶æ", + "Ġden omination", + "ĠL ana", + "5 16", + "Ġinc iting", + "sc ribed", + "j uries", + "ĠWond ers", + "app roximately", + "Ġsusp ending", + "Ġmountain ous", + "ĠL augh", + "oid al", + "N s", + "Det ect", + ") =", + "ĠL uthor", + "ĠSchwarz enegger", + "ĠMull er", + "ĠDev i", + "ec ycle", + "J ar", + "6 13", + "ĠL ongh", + "B ah", + "ĠSP ORTS", + "n w", + "Ġref inement", + "Ġwater ways", + "Ġd iner", + "Bl ade", + "68 3", + "F ac", + "Ġinitial s", + "Ġro g", + "Ġparan ormal", + "B UT", + "Ġ[ (", + "ĠSw anson", + "ĠM esh", + "âĸ ¬", + "Impro ve", + "ĠRad iation", + "ĠEst her", + "ĠE sk", + "ĠA ly", + "ik y", + "Ġir rad", + "ĠBuck ingham", + "Ġref ill", + "Ġ. _", + "Re pe", + "CON CLUS", + "Ġdifferent iated", + "Ġchi rop", + "ĠAt kins", + "Pat tern", + "Ġexc ise", + "Ġcab al", + "N SA", + "ĠST A", + "ĠS IL", + "ĠPar aly", + "Ġr ye", + "ĠHow ell", + "ĠCount down", + "ness es", + "alys ed", + "Ġres ize", + "ãĤ ½", + "Ġbudget ary", + "ĠStr as", + "w ang", + "Ġap iece", + "Ġprecinct s", + "Ġpe ach", + "Ġsky line", + "Ġ35 3", + "pop ular", + "App earances", + "ĠMechan ics", + "ĠDev Online", + "S ullivan", + "Z en", + "Ġp u", + "op olis", + "5 44", + "Ġde form", + "Ġcounter act", + "ĠL ange", + "Ġ4 17", + "Con sole", + "77 4", + "Ġnodd ing", + "Ġpopul ism", + "Ġhe p", + "Ġcoun selling", + "compl iance", + "U FF", + "Ġunden iably", + "Ġrail ing", + "ĠHor owitz", + "ĠSim one", + "ĠBung ie", + "Ġa k", + "ĠTal ks", + "x ff", + "fl ake", + "Cr ash", + "Ġsweat y", + "Ġban quet", + "ĠOFF IC", + "Ġinvent ive", + "Ġastron omer", + "ĠStam ford", + "ĠSc are", + "ĠGRE EN", + "olic ited", + "Ġr usher", + "Ġcent rist", + "ight ing", + "Ġsub class", + "Ġdis av", + "Ġdef und", + "ĠN anto", + "oci ate", + "m ast", + "Ġpac if", + "Ġm end", + "e ers", + "imm igration", + "ESS ION", + "Ġnumber ing", + "Ġlaugh able", + "ĠEnd ed", + "v iation", + "em ark", + "P itt", + "Ġmetic ulous", + "ĠL F", + "Ġcongrat ulated", + "ĠBir ch", + "Ġsway ed", + "Ġsemif inals", + "Ġhum ankind", + "m atter", + "ĠEqu ip", + "opa usal", + "S aid", + "ĠLay out", + "Ġvo icing", + "Ġth ug", + "Ġporn ographic", + "I PS", + "Ġmo aning", + "Ġgriev ance", + "Ġconf essions", + "esc al", + "TEXT URE", + "Aut hent", + "os aurus", + "P urchase", + "Ġreleg ation", + "al ter", + "ĠÂł Âł", + "Ġr iddled", + "Ġo gre", + "ĠLow ell", + "Occ up", + "E at", + "ĠHy der", + "ĠAdvis er", + "Com merce", + "H unt", + "ĠOr th", + "ĠComp etitive", + "ĠCL A", + "CD C", + "Ġsal ads", + "F le", + "Ġindustrial ized", + "` ,", + "ĠO WN", + "Ġbec k", + "ĠPart icularly", + "oub t", + "Ġm M", + "ĠHuss ain", + "ĠChen nai", + "Ġ9 20", + "Ġappoint ing", + "ĠCull en", + ",,,, ,,,,", + "Ġp ores", + "ver ified", + "Ġbi ochemical", + "em ate", + "Ġcoward ly", + "ĠHels inki", + "ĠEthiop ian", + "S OURCE", + "ER C", + "est ro", + "Ġbi otech", + "ĠS our", + "Ġbrew er", + "Bloom berg", + "Ġintens ify", + "Gl ass", + "an co", + "ĠF DR", + "gre SQL", + "ĠF ires", + "©¶æ ¥µ", + "ec o", + "100 1", + "ĠHom eless", + "Ġinstant aneous", + "ĠH aste", + "ig el", + "D iamond", + "Ġp aving", + "Ġland fill", + "Ġd ads", + "h oun", + ": ]", + "Ġinc endiary", + "ĠLiving ston", + "ĠHil bert", + "ĠChe cks", + "st yles", + "in ators", + "ĠCl ive", + "ph rine", + "Ġchimpan zees", + "Ġp all", + "ĠJ M", + "ĠAad haar", + "ð Ŀ", + "Ġachie vable", + "dis abled", + "P ET", + "OOOO OOOO", + "M ot", + "Ġint angible", + "Ġbal let", + "ĠWe bs", + "ĠEst imated", + "Effect s", + "Ġb ailed", + "Josh ua", + "Ġturb ulence", + "Ġoccup ant", + "ĠDay light", + "Ġ36 1", + "me et", + "Ġstat ically", + "Ġon look", + "Ġk i", + "il legal", + "Ġvel vet", + "Ġdehyd ration", + "Ġacqu ies", + "ĠRe z", + "ak ura", + "ĠU pton", + "at ro", + "Ġincomp rehensible", + "Ġback door", + "ĠRh ino", + "7 27", + "Ġmath s", + ") +", + "Ġhe resy", + "Ġd f", + "ĠRoc he", + "ĠL ydia", + "Ġpanc reat", + "re ply", + "arre ll", + "Ġsolicit ation", + "Ġcirc adian", + "BI P", + "Ġfor ay", + "Ġcrypt ic", + "iz u", + "ime o", + "ĠTom ato", + "ĠH oms", + "ex amination", + "Ġqu arry", + "ĠVal iant", + "ĠJer icho", + "ĠIN CLUD", + "Ġ18 40", + "5 19", + "Ġres ists", + "Ġsnap shots", + "ĠSp ur", + "ĠAnt iqu", + "Log in", + "Ġbest selling", + "Ġant ic", + "ĠS utherland", + "ãĤ¢ ãĥ«", + "Ġ~ /", + "ĠP arm", + "è ĥ", + "P ages", + "int ensity", + "Ġimm obil", + "Ġ18 65", + "zz o", + "Ġn ifty", + "Ġf entanyl", + "ĠPres ervation", + "op hen", + "Ġd arts", + "ĠD inosaur", + "po inters", + "ĠR ite", + "s uggest", + "aware ness", + "ĠSher idan", + "Ġst ances", + "Ġsor cery", + "Ġper jury", + "ĠNik ola", + "ie ver", + "Ġf iance", + "ĠJordan ian", + "ĠBall oon", + "Ġn ab", + "Ġk b", + "Ġhuman ities", + "ĠTan aka", + "hill ary", + "Ġconsult ancy", + "ĠZ ub", + "Ġrem ission", + "Ġconf id", + "CH Q", + "ĠF ug", + "Ġimpro vis", + "Y ep", + "/ _", + "Ġunwilling ness", + "Ġport folios", + "05 5", + "ĠInstruct or", + "aim an", + "Ġclaim ants", + "M bps", + "ĠBy e", + "re ceived", + "T weet", + "Ġind emn", + "ri z", + "am ara", + "N at", + "Ġeval uates", + "ĠL ur", + "ep ad", + "FO X", + "ĠTh ro", + "Ġrust y", + "Ġbed rock", + "ĠOp rah", + "J B", + "Ġmanip ulative", + "Ġwill ful", + "Ġrel apse", + "Ġext ant", + "The me", + "S ensor", + "ĠSt ability", + "go vern", + "Ġpo ppy", + "Ġkn ack", + "Ġins ulated", + "ĠT ile", + "ĠExt rem", + "Ġunt old", + "Ġconver ge", + "Ġref uel", + "ig roup", + "Ġdistort ions", + "Ġrav aged", + "Ġmechan ically", + "ĠRe illy", + "ĠN ose", + "ĠIncarn ation", + "ĠBeck y", + "abb ling", + "Ġt aco", + "Ġr ake", + "Ġmelanch oly", + "Ġillust rious", + "ĠDart mouth", + "Gu ide", + "ĠR azer", + "ĠBen z", + "Ult imate", + "ĠSur prise", + "Ġpage ant", + "off er", + "Who ever", + "Ġw iser", + "Ġchem ist", + "ĠHE LL", + "ĠBul k", + "Ġpl utonium", + "ĠCO VER", + "Ö ¼", + "f ailed", + "Ġtire lessly", + "Ġinf ertility", + "ĠTr ident", + "ĠShow time", + "ĠC iv", + "V ice", + "requ ires", + "itt ance", + "Ġun controlled", + "interest ing", + "56 1", + "Ġinnov ate", + "ateg ic", + "L ie", + "ĠS elling", + "U l", + "Ġsav ior", + "ĠT osh", + "Ġsw ast", + "P ASS", + "Ġr ink", + "Ġcard io", + "ĠI ro", + "ud i", + "Ġv antage", + "Ġv ans", + "ĠNi ño", + "+ =", + "Ġpropag ate", + "< ?", + "Ġmethod ological", + "204 39", + "Ġtrig lycer", + "Ġing rained", + "ĠAn notations", + "arr anted", + "6 17", + "ĠS odium", + "ĠA AC", + "techn ical", + "mult ipl", + "Ġ3 73", + "å ĭ", + "Ġdec isively", + "Ġboost ers", + "Ġdessert s", + "ĠGren ade", + "Ġtest ifying", + "ĠSc ully", + "ID s", + "Ġlock down", + "ĠSc her", + "ĠR é", + "ĠWhit man", + "ĠRams ay", + "rem ote", + "Ġh ikers", + "ĠHy undai", + "Ġcons cientious", + "Ġcler ics", + "ĠSiber ian", + "ut i", + "is bury", + "Ġrel ayed", + "Ġqu artz", + "ĠC BI", + "seek ers", + "ull a", + "Ġweld ing", + "ĠSh al", + "ble acher", + "T ai", + "ĠSam son", + "Ġt umble", + "ĠInvest or", + "Ġsub contract", + "ĠShin ra", + "ow icz", + "j andro", + "d ad", + "Ġtermin ating", + "ĠNe ural", + "ä» £", + "Ġleak age", + "ĠMid lands", + "ĠCaucas us", + "í ķ", + "c it", + "ll an", + "iv ably", + "ĠAlb ion", + "Ġ4 57", + "Ġregist rations", + "Ġcomr ade", + "Ġclip board", + "0 47", + "Ġdiscour aging", + "ĠO ops", + "Ad apt", + "Ġem path", + "n v", + "ĠPR OT", + "ĠDon n", + "ĠP ax", + "ĠB ayer", + "t is", + "Squ are", + "Ġfoot prints", + "part icip", + "ĠChile an", + "B rend", + "ind ucing", + "M agn", + "Ġclub house", + "ĠMagn um", + "Ġenc amp", + "ĠEth nic", + "uch a", + "ere y", + "Ġw atered", + "ĠCal ais", + "Ġcomplex ion", + "Ġsect s", + "Ġren ters", + "Ġbr as", + "oÄŁ an", + "Time out", + "Man agement", + "Ġinf ographic", + "P okemon", + "Cl ar", + "Ġloc ality", + "Ġfl ora", + "as el", + "P ont", + "Ġpop ulate", + "ĠO ng", + "Ġsubs istence", + "Ġa uctions", + "ĠMcA uliffe", + "ĠL OOK", + "br inger", + "Ġtit an", + "Ġmanif old", + "ĠâĹ ı", + "Ġcalibr ated", + "Ġcal iphate", + "ĠSH E", + "ĠCommission ers", + "ce ivable", + "j c", + "W inner", + "5 24", + "Ġcond one", + "Other wise", + "Ġp iling", + "Ġem body", + "ĠCrime an", + "ut ics", + "ĠEx hibition", + "Ġ4 26", + "e ering", + "Ġv ying", + "ĠH UGE", + "* =-", + "Ġprin cipled", + "à ¦", + "Ġquir ks", + "ĠEdit ors", + "put ing", + "G ES", + "ĠF TA", + "ठ¾", + "add on", + "ĠH AM", + "ĠFrie za", + "W oman", + ". $", + "Ġc rib", + "ĠHer od", + "Ġtim ers", + "ĠSp aces", + "ĠMac intosh", + "at aka", + "Ġgl ide", + "Ġsmell ing", + "ĠB AL", + "Ġun su", + "Ġcond os", + "Ġbicy cl", + "ĠRev ival", + "55 3", + "Ġjugg ling", + "H ug", + "ĠKardash ian", + "ĠBalk ans", + "mult iple", + "Ġnutrit ious", + "oc ry", + "19 00", + "Ġinteg rates", + "Ġad joining", + "ĠF older", + "roll ment", + "ven ient", + "Ġu ber", + "y i", + "Ġwh iff", + "ĠJu ven", + "ĠB orough", + "net te", + "Ġb ilingual", + "ĠSp arks", + "ph thal", + "man ufact", + "Ġt outing", + "ĠPH I", + "Ke efe", + "Rew ard", + "Ġinf all", + "ĠTem per", + "typ ically", + "ĠNik ol", + "Ġregular s", + "Ġpseud onym", + "Ġexhib itions", + "Ġbl aster", + "Ġ40 9", + "w arming", + "Ġrever ber", + "Ġrecip rocal", + "Ġ6 70", + "ip ient", + "b ett", + "ĠBe gins", + "Ġit ching", + "ĠPh ar", + "Ass uming", + "Ġem itting", + "ĠML G", + "Ġbirth place", + "Ġt aunt", + "ĠL uffy", + "ĠAm it", + "Ġcir cled", + "ĠN ost", + "enn ett", + "Ġde forestation", + "ĠHist orically", + "ĠEvery day", + "Ġovert ake", + "79 2", + "Ġn un", + "ĠLuc ia", + "Ġaccompan ies", + "ĠSe eking", + "ĠTr ash", + "an ism", + "R ogue", + "Ġnorth western", + "ĠSupplement al", + "ĠNY U", + "ĠF RI", + "ĠSat isf", + "x es", + "5 17", + "Ġreass ured", + "Ġspor adic", + "Ġ7 01", + "Ġmed ial", + "Ġcannabin oid", + "Ġbarbar ic", + "Ġep is", + "ĠExplos ive", + "ĠD ough", + "Ġuns olved", + "Support ed", + "Ġacknowled gment", + "sp awn", + "Ġkit chens", + "Ġ- =", + "talk ing", + "ic ist", + "ĠPeg asus", + "ĠPS U", + "Ġphot on", + "ĠAuthent ication", + "R G", + "@# &", + "76 2", + "ĠCl air", + "Ġdi aper", + "Ġbr ist", + "ĠProsecut ors", + "ĠJ em", + "6 28", + "ĠEvery where", + "ĠJean ne", + "equ ality", + "ãĥ© ãĥ³", + "object s", + "ĠPel icans", + "Ġ39 2", + "Ġbl u", + "b ys", + "ĠA go", + "Ġinstruction al", + "Ġdiscrim inating", + "ĠTR AN", + "ĠCorn el", + "ag os", + "Ġty re", + "Ġas piration", + "ĠBrid gewater", + "\": -", + "! \".", + "ĠEn s", + "ĠCoc o", + "P ie", + "Ġdet ach", + "ĠC ouch", + "Ġphys ique", + "ĠOccup ations", + "osc opic", + "en ough", + "B uzz", + "App earance", + "Y P", + "Ġrac er", + "Ġcompl icity", + "r pm", + "T oy", + "Ġinterrupt s", + "ĠCat alyst", + "Ġut ilitarian", + "imp act", + "Ġsp aghetti", + "Ġp orous", + "Ġeste emed", + "Ġinc iner", + "ĠI OC", + "7 48", + "Ġesp resso", + "ĠSm ile", + "abil ia", + "6 35", + "Ġmathematic ian", + "Ġ4 24", + "ĠK L", + "ĠH IP", + "Ġover heard", + "ĠT ud", + "ĠT ec", + "Ġqu izz", + "Ġfl attering", + "Ġcon n", + "âĢ İ", + "Ġatt aches", + "ĠR OS", + "ĠAC S", + "Ġt cp", + "ĠSh ame", + "sk ip", + "res pected", + "ĠTrin idad", + "gr ain", + "Ġfooth old", + "ĠUnch arted", + "ĠJul io", + "z l", + "av ored", + "ĠAn xiety", + "er rors", + "ĠCent auri", + "its ch", + "D addy", + "Ġclutch ing", + "ĠIm plement", + "ĠGut ierrez", + "Ġ7 60", + "Ġtele portation", + "end ra", + "Ġrevers ible", + "st ros", + "Ad venture", + "08 3", + "Ġliber ating", + "Ġas phalt", + "ĠSp end", + "AR DS", + "im sy", + "PR ES", + "ĠEmer ging", + "Ġwild fires", + "Ġtechn ologically", + "Ġem its", + "ĠART ICLE", + "Ġirregular ities", + "Ġcher ish", + "çī Ī", + "Ġst ink", + "ĠR ost", + "Econom ic", + "Ġcough ing", + "ĠMcC ann", + "pro perties", + "ilant ro", + "Ġreneg oti", + "Trans lation", + "Ġin quest", + "ĠGra pe", + "oot ers", + "gu i", + "ĠSwords man", + "ace ae", + "h itting", + "Ġr c", + "Ġexert ed", + "ĠS AP", + "it ent", + "Ġperil ous", + "Ġobsc urity", + "Ġassass inate", + "Ġab original", + "Ġresc uing", + "ĠSh attered", + "lock ing", + "all ion", + "Ch anging", + "ĠHar rington", + "ĠB ord", + "ĠAfgh ans", + "Jam ie", + "aret z", + "ĠAugust us", + "Ġ38 6", + "8 30", + "Ġj og", + "ok ingly", + "Tr igger", + "ĠH OR", + "Stat istics", + "Ġviewers hip", + "Ġadd itives", + "h ur", + "Ġmaxim izing", + "ĠR ove", + "ĠLou ie", + "ĠBuck et", + "ĠCHR IST", + "ou sel", + "Ġstre aks", + "ir ted", + "Ġt ert", + "Ġcolonial ism", + "Ġbur ying", + "y k", + "Cond ition", + "ĠDPR K", + "By Id", + "75 1", + "âĹ ¼", + "Ġwor risome", + "Ġvoc ational", + "sl ice", + "Ġsa ils", + "ĠCorrection al", + "95 4", + "Ġt ul", + "K id", + "l uster", + "Ġfam ilial", + "ĠSp it", + "ĠEp iscopal", + "Specific ally", + "ĠVol cano", + "run s", + "q s", + "Ġve tted", + "Ġcram med", + "t rop", + "here r", + "Thank fully", + "Ġper cussion", + "Ġor anges", + "Ġround up", + "Ġ4 99", + "x ious", + "Char acters", + "ĠZion ism", + "ĠR ao", + "ÃĽ ÃĽ", + "W F", + "Ġunintention al", + "ONE Y", + "Gr ab", + "Com mercial", + "Ġglut amate", + "ĠMcK enna", + "ru ciating", + "ning ton", + "ih u", + "Ch an", + "ĠSw ap", + "Ġleaf lets", + "Ġfunction ally", + "er ous", + "F arm", + "Ġcal oric", + "ĠLiter ally", + "con cert", + "Ġshe nan", + "Ġrep aid", + "ey es", + "Ġbas hing", + "ĠG orge", + "Ġcollabor ations", + "Ġun account", + "itch ie", + "Ġteam work", + "pp elin", + "Ġpip ing", + "Ġmin ced", + "Ġd iam", + "ri eg", + "Ġmasc ara", + "Ġsuck er", + "ĠMo ons", + "App s", + "ĠPe ck", + "Ġper v", + "ĠFl oat", + "o ley", + "ĠN ish", + "im ize", + "Ġarom atic", + "u in", + "end ish", + "! /", + "ĠB icycle", + "ĠAS IC", + "ile ged", + "ĠQuad ro", + "ios yn", + "Ġlock out", + "ĠW ink", + "SP EC", + "Attempt s", + "Ġseed ed", + "red o", + "ias is", + "Ġsn ag", + "ãĥķ ãĤ©", + "ãĤ ¶", + "Ġground ing", + "Ġrelie ver", + "Ġfrivol ous", + "ĠG ifts", + "ĠF aces", + "Es pecially", + "Ġmicrobi ome", + "im ag", + "ĠSch l", + "ĠP les", + "ĠBle ach", + "ĠIr win", + "ĠE aton", + "ĠDisc iple", + "Ġmultipl ication", + "Ġcoer ced", + "Ġ4 19", + "st h", + "E vil", + "B omb", + "Ġex orc", + "Ġstag gered", + "L ESS", + "Ġinert ia", + "ĠED IT", + "Ġgo b", + "Tr aditional", + "Ġclass y", + "Lear y", + "ĠP AGE", + "yr s", + "Ġtrans porter", + "Ġmat ured", + "Ġhij ab", + "Ġbi ome", + "Where as", + "Ġex termination", + "ĠT ues", + "ĠT akeru", + "ĠAud rey", + "er ial", + "ĠAd en", + "aff les", + "Ġnarciss istic", + "ĠB aird", + "UT F", + "I re", + "ĠCon nie", + "Ch amp", + "Ġwhis pering", + "ĠH att", + "D K", + "Ġdis infect", + "Ġdeduct ed", + "Ġpart ake", + "Ġdown grade", + "ĠEs ports", + "ĠContin uing", + "Ġdemocr atically", + "icro bial", + "itt a", + "Ġlim estone", + "Ġexempt ed", + "ĠFren zy", + "H erm", + "7 28", + "Ġfled gling", + "Met a", + "765 61", + "69 3", + "% :", + "w ake", + "5 26", + "ĠDis cipline", + "Ġvirgin ity", + "ĠLeg ions", + "ĠFrank ie", + "int ent", + "Ġrest rooms", + "ĠRou ter", + "da q", + "Ġobjection able", + "âĨ ij", + "w ark", + "ĠRah ul", + "g ain", + "activ ation", + "abs olute", + "ĠAccess ed", + "Ġ24 00", + "ogg les", + "Ġsecond ly", + "ĠDEF ENSE", + "Ġpost age", + "wra pper", + "sh arp", + "7 29", + "Ġcommun icates", + "Ġadd on", + "ĠMil itia", + "H ong", + "Ġsl umped", + "ĠJP EG", + "ĠI car", + "ad ish", + "68 1", + "Ġmaj esty", + "ĠWolf gang", + "ĠEl astic", + "u per", + "Ġv iz", + "Ġunconscious ly", + "ĠST D", + "ĠS ass", + "Ġflower ing", + "ĠHel ic", + "ĠDra per", + "ĠAm ateur", + "Ġman ure", + "Ġdis ingen", + "ĠLe i", + "br ing", + "9 49", + "Ġinhib ited", + "Ġhead quartered", + "Ġen igmatic", + "�� �", + "Ġred ress", + "R H", + "Ġratt led", + "Ġd iction", + "l io", + "ĠT BA", + "ĠSN AP", + "C alling", + "Ġfasc ists", + "ĠD ove", + "iew icz", + "0 36", + "Ġco asts", + "ĠR ect", + "Ġ) ]", + "L ot", + "6 29", + "ĠS EM", + "ĠPeters en", + "ĠExpl ain", + "ĠBo ards", + "ĠBe zos", + "ĠJ ournals", + "Ġ20 24", + "p arser", + "Ġmist rust", + "Ġgr ate", + "ĠL ocked", + "bo a", + "S aint", + "g aming", + "Ġvow el", + "in ately", + "bl ow", + "All ah", + "Ġun matched", + "Ġb ordering", + "ĠExp end", + "n r", + "Or acle", + "rou ch", + "Ġcont iguous", + "ac us", + "Ġdist raught", + "58 1", + "Ġanat omical", + "O X", + "ap ixel", + "8 33", + "ĠPL US", + "Ġres usc", + "Ġab iding", + "57 3", + "Ġvac ancies", + "Em ily", + "Ġhyp othal", + "ĠWer ner", + "ĠWe e", + "ĠDJ s", + "5 13", + "Ġwitch craft", + "Ġac upuncture", + "ent ary", + "benef it", + "Product s", + "ĠP SP", + "ĠMP G", + "ĠJ inn", + "ĠJ arrett", + "Ġ4 45", + "ĠIm aging", + "ĠP yth", + "Fin ish", + "Ġte x", + "Ġjuven iles", + "Ġhero ism", + "Ġdoubt less", + "ĠA ki", + "ĠT end", + "ĠPatri arch", + "Ġbit ters", + "ĠTele communications", + "it atively", + "ag na", + "Ġr g", + "ĠS OLD", + "Ġcomp ulsion", + "ĠN asa", + "ĠKath ryn", + "Ġmillion aires", + "Ġintrins ically", + "Ġbolst ered", + "time out", + "fl o", + "Ġtut or", + "p our", + "Stat ement", + "Ġ{ *", + "ĠRud olph", + "ĠKimber ly", + "rog ens", + "adi q", + "] +", + "Ġindign ation", + "Ġfract uring", + "ĠRe leases", + "ĠGr ain", + "pro tein", + "L ago", + "Ġvac ations", + "Ġboot ed", + "ĠTH REE", + "ĠH G", + "oresc ence", + "Ġt f", + "Ġso ar", + "iosyn cr", + "Ġgl ances", + "ĠSp oon", + "ĠJ ury", + "ĠCow boy", + "Ġcreat ively", + "Hig her", + "Ġsolic itor", + "Ġhaw k", + "ac io", + "89 6", + "Ġsuperf lu", + "Ġbombs hell", + "ct ure", + "Ġbroker age", + "Ġraid ing", + "Ġf rench", + "Ġang led", + "Trans action", + "ĠGen ocide", + "u pe", + "ĠHait ian", + "57 2", + "! :", + "Ġunwitting ly", + "iter ator", + "sc roll", + "Ġtall ied", + "Ġbi omedical", + "ĠC ARD", + "Ġe uphem", + "Ġbrain storm", + "a quin", + "K o", + "Mic helle", + "ĠR unes", + "ĠBall istic", + "ud ers", + "Ġmod esty", + "ĠiP ads", + "ĠEzek iel", + "Y E", + "Ġstars hip", + "Ġpower fully", + "Ġper l", + "ĠSh ade", + "ĠQu art", + "ĠE EG", + "Ġfisher man", + "OS ED", + "ĠTyp ical", + "df x", + "Ġmes hes", + "Ġet ched", + "worth iness", + "Ġtopp led", + "Ġ3 96", + "or ius", + "We iss", + "Ġmy sql", + "ĠVal halla", + "Ù Ĵ", + "le asing", + "Ġrec omp", + "rap nel", + "S el", + "04 3", + "Ġder ailed", + "ĠGu ides", + "IR T", + "Ġde human", + "ĠBritt any", + "\" ))", + "Ġex claim", + "Ġb alk", + "Ġ8 40", + "CLA IM", + "int el", + "L AB", + "Ġpe gged", + "Ġast roph", + "sm oking", + "Ġrig ging", + "Ġfix ation", + "Ġcat apult", + "ins ide", + "ĠC ascade", + "ĠBolshe vik", + "G aza", + "Dep th", + "Ġloud spe", + "Ġalmond s", + "me yer", + "l eness", + "j en", + "f resh", + "Ġunbeat en", + "ĠSqu id", + "ĠPres umably", + "Tim er", + "B W", + "Ġro sters", + "Ġell ipt", + "ĠHar riet", + "dat abase", + "ĠMut ual", + "ĠComm odore", + "uk ed", + "kn ife", + "ĠCOMM UN", + "h ya", + "Ġmel ts", + "arch ives", + "Ġrat ification", + "Ġmultip lying", + "Ġinter oper", + "Ġasc ert", + "w ings", + "ver ting", + "ĠScorp ion", + "ay e", + "ĠPorts mouth", + "ĠM TA", + "n it", + "iaz ep", + "Ġqu arantine", + "Ġslides how", + "Ġcent imeters", + "Ġsyn opsis", + "Ġsp ate", + "th irst", + "Ġnom inating", + "ĠMel vin", + "Pre view", + "Ġthro b", + "Ġgener ational", + "ĠRad ius", + "rest ling", + "put able", + "aw ar", + "N ECT", + "Ġunlaw fully", + "ĠRevel ations", + "Wik ipedia", + "sur v", + "Ġeye ing", + "ij n", + "ĠF W", + "Ġbr unt", + "Ġinter stellar", + "Ġcl itor", + "ĠCroat ian", + "ĠCh ic", + "ev a", + "ĠDis app", + "ĠA kin", + "iner ies", + "d ust", + "Interest ed", + "Ġgen esis", + "ĠE ucl", + "ö n", + "p icking", + "Ġmut ated", + "Ġdisappro ve", + "ĠHD L", + "Ġ6 25", + "Ì ¶", + "c ancer", + "Ġsqu ats", + "Ġle vers", + "Disc uss", + "= ]", + "D ex", + "ĠVIDE OS", + "A UD", + "Ġtrans act", + "ĠKin ect", + "ĠK uala", + "ĠC yp", + "7 47", + "Ġsh attering", + "Ġarsen ic", + "ĠInt ake", + "ĠAngel o", + "ĠQu it", + "ĠK he", + "Ġ18 93", + "M aker", + "0 29", + "ĠPain ting", + "Dis able", + "9 16", + "Ġanal ges", + "Ġtact ile", + "Ġprop hes", + "Ġd iced", + "ĠTravel s", + "ĠHe ader", + "ĠClub s", + "Ass istant", + "Ġinc rim", + "Ġd ips", + "Ġcruc ifix", + "ĠShan ahan", + "ĠInter pret", + "Ġ40 90", + "al ogy", + "abb a", + "Ġsimul ac", + "hus band", + "S IM", + "Ġrecy cle", + "uc er", + "ed ged", + "Ġre naissance", + "ĠBomb ay", + "Cath olic", + "ĠL INE", + "ĠCl othing", + "re ports", + "Ġpl aus", + "Ġd ag", + "ĠM ace", + "Z I", + "Ġintr uder", + "ĠVeter inary", + "g ru", + "Ġsne aky", + "ĠS ie", + "ĠC innamon", + "P OSE", + "Ġcou rier", + "ĠC NS", + "Ġemanc ipation", + "s it", + "Ġplay through", + "ĠFac ilities", + "v irt", + "ĠG auntlet", + "Thom pson", + "Ġunbeliev ably", + "Param eters", + "Ġst itching", + "ign e", + "ĠTH ESE", + "Priv acy", + "Ġshenan igans", + "Ġvit ri", + "ĠVal id", + "59 1", + "Ń ·", + "ĠProt otype", + "ink a", + "SC P", + "ĠT id", + "è Ī", + "old ed", + "Ġindividual ity", + "Ġbark ing", + "Ġm ars", + "ĠW D", + "Ġ8 20", + "Ġt ir", + "Ġsl apping", + "Ġdisgr untled", + "ĠAng ola", + "ri us", + "ĠTorn ado", + "ĠTh urs", + "Ġcapt cha", + "Ġang st", + "ĠP og", + "ĠAssass ins", + "ĠAd idas", + "Ġjoy ful", + "Ġwh ining", + "Emer gency", + "Ġphosph orus", + "Ġatt rition", + "oph on", + "ĠTimber wolves", + "ĠJ ah", + "ĠBr inging", + "ĠW ad", + "ĠEn sure", + "oh l", + "ĠX ie", + "omm el", + "c mp", + "Ġz ipper", + "Ġrel at", + "ĠCor ridor", + "m ilo", + "T ING", + "Av g", + "Ġcro pped", + "] }", + "Ġr aged", + "ĠLump ur", + "ĠGuer rero", + "our ke", + "N ut", + "Ġoff sets", + "og lu", + "dr m", + "Ġmort als", + "lat able", + "Ġdismiss ive", + "ä¸ ī", + "Ġthro ats", + "Ġchips et", + "ĠSpot light", + "Catal og", + "art ist", + "G b", + "Ġch illy", + "Ġst oked", + "Ġ3 74", + "W ard", + "L atin", + "Ġf iasco", + "Ġble ach", + "Ġb rav", + "Enh anced", + "Ġin oc", + "ĠFior ina", + "_ >", + "Ġle ukemia", + "Ġel uc", + "Ġannoun cer", + "ĠLith uan", + "ĠArm ageddon", + "å ĩ", + "Len in", + "ĠR uk", + "Ġpe pp", + "ĠRom antic", + "ĠP IT", + "ĠInter stellar", + "ĠAt kinson", + "R aid", + "J s", + "Go al", + "C ourse", + "Ġvan ishing", + "es ley", + "ĠR ounds", + "Els a", + "59 3", + "Ġredund ancy", + "ĠST AND", + "Ġprop hetic", + "Ġhabit able", + "ry u", + "Ġfaint ly", + "M ODE", + "Ġfl anked", + "IR C", + "Aw esome", + "Ġsp urious", + "ĠZ ah", + "ĠMS G", + "Ġsh ading", + "Ġmotiv ational", + "ĠSant ana", + "ĠS PR", + "Ġexc ruciating", + "om ial", + "ĠM iko", + "ĠLe opard", + "A byss", + "Ġ[ |", + "d irty", + "Ġbath s", + "Ġdem oral", + "and re", + "P B", + "Ġun ification", + "Ġsac rament", + "Ġ[ &", + "Ġpric eless", + "Ġgel atin", + "Ġeman ating", + "ĠAll aah", + "98 6", + "Ġout burst", + "Ġer as", + "ĠX VI", + "ĠSP I", + "O tt", + "ĠLaz arus", + "PL IED", + "F lying", + "blog s", + "W isconsin", + "R aven", + "Ġreb ate", + "Ġcreep s", + "ĠSp an", + "ĠPain ter", + "ĠKir a", + "ĠAm os", + "ĠCor vette", + "Cons umer", + "ĠRec over", + "ck i", + "Ġpes ky", + "ĠIn vention", + "Compan ies", + "Ġchalleng ers", + "ad emic", + "ĠUkrain ians", + "ĠNeuro log", + "ĠFors aken", + "Ġent rants", + "Ġemb attled", + "Ġdef unct", + "ĠGlac ier", + "Ġpo isons", + "ĠH orses", + "m akes", + "ĠD irt", + "Ġ4 23", + "hh h", + "ĠTrans formation", + "QUI RE", + "................ ..", + "Ġtrave ller", + "ĠSe xy", + "ĠK ern", + "ip olar", + "Ġransom ware", + "oooooooo oooooooo", + "E c", + "rub y", + "Prof essional", + "ĠOut break", + "arg ument", + "G rey", + "ĠFif a", + "ĠCH O", + "ĠFOR M", + "ĠAm trak", + "- [", + "Ġcr adle", + "Ġantioxid ants", + "ãģ®å ®", + "7 36", + "ĠNAS L", + "ĠContribut ions", + "Ind iana", + "ĠST EP", + "C SS", + "Ġsal ient", + "Ġall ocations", + "yr ights", + "Ġm ashed", + "ĠCut ter", + "Sex ual", + "Ġp ounded", + "Ġfan base", + "Ġc asc", + "ĠTrans parency", + "Ġanaly tic", + "ĠSummon er", + "× ŀ", + "ĠAD C", + "det ail", + "Ġvan quished", + "Ġcr abs", + "ar ie", + "Dest roy", + "ĠS ack", + "Ġtrans istor", + "Al abama", + "ĠK oen", + "ĠFisher ies", + "c one", + "Ġannex ed", + "ĠM GM", + "es a", + "Ġf aked", + "ĠCong ratulations", + "Ġhind ered", + "Ġcorrection al", + "ĠI TV", + "lee ve", + "Ġin appropriately", + "lic ks", + "Ġtresp ass", + "Ġp aws", + "Ġnegoti ator", + "ĠChrist ensen", + "lim its", + "ĠDian ne", + "Ġeleg ance", + "ĠContract s", + "an ke", + "Ob j", + "Ġvigil ance", + "Ġcast les", + "ĠN AD", + "ĠHol o", + "Ġemph atically", + "ĠTit us", + "ĠServ ing", + "ĠRich ie", + "ĠP igs", + "5 68", + "Ġanim osity", + "ĠAtt ributes", + "ĠU riel", + "M Q", + "my ra", + "ĠApplic ant", + "Ġpsychiat rists", + "ĠV ij", + "ĠAb by", + "ag ree", + "P ush", + "Ġk Wh", + "hib a", + "Ġinc ite", + "ĠWe asley", + "ĠTax i", + "minist ic", + "hy per", + "ĠF arn", + "Ġ6 01", + "ĠNation wide", + "F ake", + "95 2", + "Ġma ize", + "Ġinteract ed", + "Ġtransition ed", + "Ġparas itic", + "Ġharm onic", + "Ġdec aying", + "Ġbas eless", + "ns ics", + "Ġtrans pired", + "Ġabund antly", + "ĠFore nsic", + "Ġtread mill", + "ĠJ av", + "ab and", + "Ġssh d", + "Ġfront man", + "ĠJak arta", + "oll er", + "dro ps", + "ĠSERV ICES", + "rompt u", + "oph ical", + "h ospital", + "bled on", + "6 45", + "Ġmid range", + "ĠEV ENT", + "cul ated", + "raw led", + "Ġper ched", + "Ġover board", + "ĠPe el", + "ĠP wr", + "ĠCar th", + "ĠCOM PLE", + "co e", + "sh all", + "Ġdeter rence", + "M ETHOD", + "ĠAbs ent", + "M EN", + "Ġs ill", + "ĠLE VEL", + "Y ork", + "Ġsin ners", + "ĠOP EC", + "ĠN ur", + "ĠDesign s", + "se lection", + "Ġunw orthy", + "CH A", + "Ġstreng thens", + "88 3", + "ed ly", + "Ġslic ing", + "Ġmal nutrition", + "Ġfilm making", + "ĠPol k", + "ur ated", + "Ġ4 21", + "bre akers", + "!' \"", + "Ġwet lands", + "ĠDisc rimination", + "Ġallow able", + "Ġste ered", + "ĠSic ily", + "S AM", + "Ġmust ache", + "Ġm ids", + "Ġcl ipped", + "Ġcirc ulate", + "Ġbr ittle", + "ĠBuild ings", + "ra ised", + "ĠRound up", + "Ġwealth ier", + "Ġoverw rite", + "Ġover powered", + "ĠGerr ard", + "s ites", + "PD ATED", + "Ġacute ly", + "ĠGam ble", + "Ġp im", + "ĠK us", + "Typ ically", + "De ploy", + "ĠMoroc can", + "p otion", + "com be", + "Ġvigil ante", + "Ġ36 3", + "St ew", + "ĠB agg", + "Ġres ided", + "ĠSp o", + "Ġrem nant", + "Ġempt iness", + "br ainer", + "Ġout patient", + "pri ority", + "Ġle ptin", + "ĠPay ton", + "ĠGle aming", + "ĠS hed", + "ĠPol o", + "ĠMormon ism", + "rest ricted", + "arl ane", + "w x", + "Ġcreat ine", + "ĠAn on", + "ĠST UD", + "ĠJ UL", + "ĠT ee", + "5 28", + "08 9", + "Ġhat ched", + "Dis patch", + "ĠCompos ite", + "Ġ45 1", + "p uff", + "ĠX COM", + "ĠOr n", + "ĠTH ANK", + "END ED", + "ĠAshe ville", + "Ġà ľ", + "Ġman go", + "ĠS lightly", + "world ly", + "ĠW ander", + "ĠExp and", + "ĠCh r", + "M ist", + "Ġorthodox y", + "ĠUN ESCO", + "reg ate", + "Else where", + "k ie", + "ir led", + "Ġtopp le", + "Ġadopt ive", + "ĠLeg s", + "d ress", + "ĠS agan", + "b are", + "ĠGl ou", + "Cr unch", + "Ġhelp ers", + "Ġchron ically", + "ĠH uma", + "1 0000", + "Ġaccommod ating", + "äº Ķ", + "Ġwrink les", + "Ġdod ged", + "four th", + "Ġpre con", + "Ġcompress or", + "ĠK are", + "Ġev ict", + "ĠWar wick", + "im ar", + "Ġmodern ization", + "Ġband wagon", + "Ġref uted", + "Ġnet ted", + "ĠNa ples", + "ĠGen ie", + "per ors", + "Ġfield ed", + "Ġde re", + "ĠPar ables", + "le es", + "Ġtr out", + "asp ers", + "Ġn ihil", + "Ġhapp iest", + "Ġflo ppy", + "ĠLo ft", + "ĠHe ard", + "Ġun ison", + "Ġl ug", + "ĠRed mond", + "class ic", + "Supp orters", + "SH IP", + "G MT", + "Ġfue lled", + "ç IJ", + "Ġd d", + "ĠEmin em", + "Ġ18 97", + "NY SE", + "Ġsecret aries", + "ĠF IA", + "ĠCanaver al", + "F avorite", + "Ġp omp", + "Ġdetain ee", + "ers hip", + "aim on", + "i our", + "ĠA pex", + "Ġplant ations", + "am ia", + "ac ion", + "R ust", + "Ġtow ed", + "ĠTru ly", + "5 77", + "Ġshel tered", + "r ider", + "W o", + "Ġl air", + "ĠInt elligent", + "impro ve", + "m atically", + "Ġet iquette", + "ad ra", + "all o", + "ĠJun o", + "any thing", + "ĠStru ggle", + "ĠPred ict", + "ĠGr imes", + "ĠAMER ICA", + "ct x", + "ĠSit uation", + "W OOD", + "Ġsol uble", + "me ier", + "Ġintoler able", + "ang ering", + "Ġun interrupted", + "Ġtool tip", + "Ġinterrog ated", + "Ġgun ned", + "ĠSne ak", + "æŃ ¦", + "Ġt ether", + "Ġcr umble", + "L ens", + "Ġclust ered", + "ĠSy l", + "ĠHas an", + "Ġdystop ian", + "w ana", + "Ġjoy stick", + "ĠTh ib", + "amm u", + "Tom orrow", + "5 46", + "Ġoverc ame", + "Ġminim ized", + "cept or", + "Run ner", + "ENG TH", + "ĠBrend a", + "ĠAchieve ments", + "Ġtor ches", + "Ġrapp ort", + "ĠInvestig ator", + "ĠHand ling", + "rel ation", + "g rey", + "8 15", + "Ġk cal", + "ĠComm ands", + "d q", + "Ġcur ls", + "Ġbe arer", + "Ġcyn icism", + "it ri", + "ĠUse ful", + "B ee", + "D CS", + "Ġab ras", + "P ract", + "BIL ITIES", + "7 12", + "Ġdebug ger", + "Ġdebt or", + "ĠL ia", + "ĠK ers", + "Ġexacerb ate", + "ĠSt acy", + "ĠB land", + "ĠSc enes", + "Ġbranch ing", + "âĸĪâĸĪâĸĪâĸĪ âĸĪâĸĪâĸĪâĸĪ", + "ape ake", + "Ġs alsa", + "Ġmish and", + "ĠKon ami", + "ĠN ib", + "Ġanecd ote", + "Ġagree able", + "Ï ī", + "ĠNath aniel", + "ĠHe isman", + "ĠB eware", + "Ġ18 86", + "spect ive", + "69 1", + "5 22", + "Ġinhib its", + "Ġhas hing", + "Ġ18 89", + "å° Ĩ", + "v ich", + "P ure", + "Ġsolid ly", + "Ġaspir in", + "im aru", + "Ġstreet car", + "ĠU CS", + "ĠJ udd", + "Ġflash backs", + "p ins", + "Ġ14 40", + "ĠUN HCR", + "ĠSym ptoms", + "T IT", + "5 38", + "F ra", + "% );", + "Ġo oz", + "Ġcur few", + "Ġcal med", + "Ġparticip ates", + "Te X", + "Ġnons ensical", + "Ġfull back", + "ĠDe L", + "mon key", + "h ari", + "Ġmetabol ites", + "Ġloot ed", + "ĠAL WAYS", + "ĠB CC", + "L t", + "oc het", + "B one", + "Ġveto ed", + "Ġg cc", + "ĠCL ICK", + "Ġ18 88", + "s af", + "Ġstiff ness", + "Ġlow ly", + "ĠGe h", + "vers on", + "ors et", + "Ġun foreseen", + "Ġan esthesia", + "ĠOpt ical", + "Ġrecon structed", + "ĠT up", + "sh ows", + "NEW S", + "ĠNewsp aper", + "ĠA SA", + "ter a", + "N umbers", + "Ġinexpl icable", + "× ij", + "Ġhard ness", + "unt arily", + "ĠA cer", + "grad ient", + "ARD IS", + "Ġwood land", + "Ġmetaph ors", + "ĠWem bley", + "ĠPa vel", + "phil is", + "Ġre writing", + "Ġpercept ual", + "Ġ10 70", + "worm s", + "ĠDown s", + "Ġunsur prisingly", + "Ġtag ging", + "fl ame", + "Ġlit res", + "Ġboun ces", + "ĠB abe", + "sh ut", + "Ġoverd oses", + "ĠShe ila", + "ĠCh au", + "ĠBl ess", + "Capt ure", + "ĠSign ificant", + "ĠSc ion", + "Ġ38 9", + "ĠMc H", + "ĠTitan ium", + "ĠMe al", + "amed a", + "ag ents", + "agg ressive", + "B illy", + "76 3", + "ĠS aying", + "DER R", + "it one", + "Coll ins", + "B ound", + "Ġbol ted", + "ĠDM CA", + "95 3", + "Ġun iqueness", + "Ġep igen", + "un ci", + "ant am", + "Ġreck oning", + "ch airs", + "OG R", + "ĠSen egal", + "Ġ18 62", + "re levant", + "Ġ ¯", + "Ġpharm acies", + "ĠG eral", + "v ier", + "Y an", + "OR PG", + "Ġrab id", + "b ending", + "ĠUN ITED", + "Ġ4 65", + "As sembly", + "Ġwe ep", + "Ġbe hest", + "ĠMother s", + "ĠJ ace", + "h id", + "Ġwh irlwind", + "ĠUN IVERS", + "Ġut opian", + "Ġkidn ap", + "Ph ilipp", + "K in", + "89 3", + "Ġlivest ream", + "ĠM ISS", + "Ġsub versive", + "ĠTechn iques", + "ĠJUST ICE", + "ĠB ASE", + "Ġ38 7", + "Ġassail ants", + "ĠHard core", + "Ġsprink led", + "ĠP se", + "é ļ", + "print ed", + "ĠH au", + "OR GE", + "ĠT OUR", + "Ġl aced", + "Ġit ch", + "G iving", + "Ġport ed", + "78 1", + "//////////////// ////////////////", + "bre eding", + "Ġlog ger", + "ĠH OL", + "inn ie", + "First ly", + "Ġembry onic", + "Ġdeleg ated", + "p ai", + "O IL", + "Ġcentr ally", + "ĠR x", + "ĠSc outing", + "D utch", + "Ġhe reditary", + "ĠCru iser", + "s at", + "5 29", + "ĠMar riott", + "other mal", + "Ġprohib itions", + "E arn", + "ĠSt ab", + "ĠColleg es", + "ĠBel ief", + "st retched", + "ĠL H", + "ĠEntity Item", + "C IA", + "Ġun rem", + "Ġlaure ate", + "Ġdenomin ations", + "sum mary", + "h ler", + "S pect", + "ĠK laus", + "ĠBe ans", + "Ġins ur", + "ĠPA X", + "Ġfield er", + "ĠV et", + "ĠSp arrow", + "z ie", + "ĠS Q", + "ĠMond ays", + "ĠOff line", + "ĠLer ner", + "ĠExt ensions", + "Ire land", + "Ġpatron age", + "Ġcontrast ed", + "ĠMan ia", + "h irt", + "Mos cow", + "Ġcondem ns", + "ĠAn ge", + "Ġcomp osing", + "ĠPe pe", + "ĠP addock", + "Ġheter ogeneity", + "Ġide ologically", + "Ġf ishes", + "Ġcur sing", + "ĠR utherford", + "ĠFlo ating", + "ĠAm elia", + "Te a", + "Syn opsis", + "Ġstun ts", + "Ġbe ad", + "Ġstock ing", + "ĠM ILL", + "ob ook", + "mass ive", + "\\ <", + "Ġh ump", + "ĠPref erences", + "Engine Debug", + "ge ist", + "ĠNiet o", + "ome ver", + "ish y", + "eval uate", + "col onial", + "Altern ative", + "ĠGo Pro", + "ĠV ortex", + "ĠNET WORK", + "ans ky", + "Sec ure", + "ĠTh rust", + "Sn ake", + "Ġparcel s", + "Ġsam urai", + "Ġactress es", + "N ap", + "M F", + "ifer ation", + "Be er", + "5 23", + "ĠI ly", + "oint ment", + "P ing", + "Ġstri ped", + "ĠMell on", + "oss ession", + "Ġneut ron", + "end ium", + "Ġa ph", + "ĠFlav oring", + "Ġ38 3", + "Ġrespons iveness", + "ĠJ indal", + "ĠHitch cock", + "Den ver", + "ĠDRAG ON", + "sm anship", + "ĠDu pl", + "Ġs ly", + "Ġweb cam", + "ĠTw ain", + "ĠDar ling", + "ili ate", + "cons umer", + "D IT", + "Ġnames ake", + "Ġun orthodox", + "Ġfun er", + "ĠPL oS", + "ĠCONTR OL", + "ozy g", + "ogl obin", + "F ACE", + "ER G", + "ĠD ia", + "ĠF iesta", + "ce le", + "0 34", + "Ġencl ave", + "âĸ¬ âĸ¬", + "on ement", + "al ist", + "M and", + "Ġhome grown", + "ĠF ancy", + "Ġconcept ions", + "ĠCont ains", + "ure en", + "Ġreiter ate", + "Ġme ager", + "Ġinstall ments", + "Sp awn", + "6 27", + "Ġphot oc", + "ĠCab rera", + "ĠRos enthal", + "ĠLans ing", + "is ner", + "Ġinvest s", + "ĠUFO s", + "EX P", + "Hard ware", + "Ġtr agically", + "Ġconced es", + "ie ft", + "ch am", + "bor gh", + "ĠSch r", + "ĠMel anie", + "ĠH oy", + "Ġvisit ation", + "Ġid iosyncr", + "Ġfract ions", + "Ġfore skin", + "ob os", + "Ġpo aching", + "ĠVI EW", + "Ġstimul ates", + "ĠG ork", + "can on", + "M IC", + "ĠNem esis", + "ĠInd ra", + "ĠDM V", + "Ġ5 29", + "Ġinspect ing", + "Ġgrand ma", + "ĠW hedon", + "ĠSh ant", + "ĠP urg", + "ik an", + "ĠT eg", + "ĠCL R", + "z ac", + "Vict oria", + "ĠVer ify", + "ion ics", + "Ġpart ying", + "ĠM ou", + "col our", + "Ġtestim onies", + "l ations", + "Ġpress uring", + "hi ro", + "ac ers", + "Ġf id", + "ang ler", + "ĠCS I", + "Ġhere after", + "Ġdiss idents", + "report ing", + "iph any", + "che v", + "Ġsol itude", + "Ġl obe", + "Ġind is", + "Ġcred ential", + "re cent", + "ad ult", + "ĠNir vana", + "ĠFranch ise", + "L ayer", + "H yp", + "ĠBerks hire", + "Ġwill s", + "t if", + "Ġtot em", + "ĠJud ah", + "rep air", + "Inst ant", + "5 48", + "Ġemb assies", + "Ġbott leneck", + "Ġb ount", + "Ġtyp ew", + "ĠAl vin", + "j ing", + "im ilar", + "R ush", + "Ġbr im", + "ĠHEL P", + "A im", + "] '", + "Ġpass ively", + "Ġbound ed", + "ĠR ated", + "Ġcriminal ity", + "Ġbiom ark", + "Ġdisp atcher", + "ĠTow ards", + "Ġ+ ++", + "right eous", + "f rog", + "ĠP anc", + "C arter", + "0 32", + "æ© Ł", + "Ġult raviolet", + "ĠLic ensed", + "ĠT ata", + "ĠBl essing", + "ĠG AM", + "Ġchem ically", + "ĠSe af", + "ĠRE LE", + "ĠMerc enary", + "capital ist", + "Ġform ulations", + "Ġann ihilation", + "ĠVer b", + "ĠAr gon", + "Ġun loaded", + "Ġmorp hed", + "Ġconqu ering", + "back er", + "I ELD", + "Ġtheft s", + "Ġfront runner", + "ĠRoy ale", + "ĠFund amental", + "el ight", + "C hip", + "necess ary", + "ay n", + "ĠSl ip", + "Ġ4 48", + "cern ed", + "P ause", + "Ġshock ingly", + "ĠAB V", + "Ġcomp osure", + "7 33", + "ĠMotors port", + "ah ime", + "Mur ray", + "M ach", + "Ġgr ids", + "Ġdeb ian", + "Ġfurther more", + "Ġdexter ity", + "ĠCollect ions", + "os lov", + "il age", + "b j", + "ĠMont eneg", + "Ġstrut Connector", + "Ġmassac res", + "Ġbrief s", + "fet ched", + "uv ian", + "ol ition", + "Fail ure", + "emon ic", + "Ġfl ared", + "Ġclaim ant", + "Ġc ures", + "Ġgive aways", + "ĠSubst ance", + "al ions", + "Ġcr inge", + "ĠK ul", + "Ġarist ocracy", + "ĠUl ster", + "ol ated", + "h ousing", + "ĠM IS", + "Ġgl ared", + "ĠWil helm", + "ne eds", + "lam bda", + "build ers", + "ĠV IS", + "Ġradi ator", + "ĠGhost busters", + "Ġ4 36", + "act ual", + "Ġher ds", + "ç a", + "watch ing", + "Ġcounter ing", + "Ch arge", + "Ġchar red", + "Ġwar heads", + "Ġiod ine", + "ĠM acy", + "04 1", + "Ġdepart ures", + "ĠS ins", + "Ġdy ed", + "ĠConcept s", + "g ado", + "7 13", + "Ġquot ations", + "Ġg ist", + "ĠChrist y", + "Ġant igen", + "ĠHem p", + "ĠD rawn", + "ĠB arg", + "ez vous", + "Ġp aternity", + "Ġar du", + "ĠAnch orage", + "ĠR ik", + "Ġover loaded", + "ĠUs ername", + "ĠTam my", + "ĠN au", + "ĠCell ular", + "Ġw aning", + "Ġrod ent", + "ĠWor cester", + "il ts", + "ĠT ad", + "Ġdwell ings", + "Ġbull ish", + "4 31", + "Ġretali ate", + "Ġmig raine", + "ĠChev ron", + "CH ECK", + "Ġdon key", + "c rim", + "SP A", + "ĠAn alog", + "Ġmarqu ee", + "ĠHa as", + "B ir", + "ĠGD DR", + "ĠDownload s", + "Ġwill power", + "ĠFor th", + "ĠRecord ed", + "Ġimp ossibility", + "ĠLog ged", + "ĠFr anks", + "ĠR att", + "in itions", + "Ġclean ers", + "Ġsore ly", + "Ġflick ering", + "ĠEx amination", + "c atching", + "allow een", + "Ms g", + "Ġdun no", + "F a", + "Ġdys ph", + "c razy", + ".' '.", + "Ġmain line", + "Ġc s", + "Ġp tr", + "ĠW ally", + "ig un", + "95 1", + "ĠBig foot", + "f ights", + "Ġretrie ving", + "J r", + "Ġdupl ication", + "ĠExpl an", + "Ġrel ational", + "Ġqu aint", + "Ġbisc uits", + "Ġad o", + "Ġsh udder", + "Ġantid ote", + "blood ed", + "ks h", + "Ġsa uces", + "Ġrein vest", + "Ġdispens ary", + "ĠD iver", + "Ġ9 000", + "stud ent", + "Ġin separ", + "esc ap", + "Ġtodd lers", + "ĠGP IO", + "ĠAss ignment", + "head ers", + "Ġlack luster", + "Ġab ack", + "95 6", + "Ġtool bar", + "7 45", + "Ġo ust", + "Ġcontempl ation", + "ĠPRES IDENT", + "Ġ4 58", + "==== ==", + "Ġguarantee ing", + "ĠHe ist", + "ĠCann es", + "Ļ ½", + "Ġcollabor ator", + "ĠAm p", + "Ġg ou", + "ĠSH ALL", + "st ories", + "78 3", + "Ġmobil ized", + "Ġbro od", + "ĠL U", + "ĠðŁ ij", + "Ġref in", + "ĠAnthrop ology", + "v ind", + "ill i", + "Ġwarrant ies", + "ĠB abel", + "Ġsw ath", + "Ġc aches", + "Ġantagon ists", + "art ifacts", + "Ġhot ly", + "ĠSt arts", + "ĠG ö", + "z ag", + "!! !!!", + "Ġsc ourge", + "Ġcons piring", + "ru its", + "re verse", + "ĠShe en", + "ĠJes uit", + "ĠGiov anni", + "ad ies", + "Ġbutt ocks", + "ear cher", + "ac an", + "Ġvolley ball", + "Ġshroud ed", + "Ġscore board", + "b ats", + "ĠI PM", + "Ġass es", + "Ġde regulation", + "ĠTe legram", + "ĠReb oot", + "Ġ7 000", + "ĠCan ary", + "Ġk ernels", + "ĠFranç ois", + "ĠD uff", + "ĠP on", + "ĠLe ica", + "ĠGar min", + "Ġor phans", + "ĠClaud ia", + "Ġcal endars", + "ĠLe ilan", + "ent o", + "R ocket", + "Ġbr unch", + "ĠHaw king", + "ain ers", + "Ġsens ibilities", + "Ġk W", + "ĠK and", + "Ġre claimed", + "Ġinteresting ly", + "× ©", + "rom y", + "J M", + "ĠEnhance ment", + "b ush", + "Sk ip", + "Ġrapp ers", + "Ġg azing", + "p edia", + "ath lon", + "Rev olution", + "Ġsn ipers", + "Ġre verted", + "Ġconglomer ate", + "T erry", + "79 4", + "Ġhars her", + "Ġdes olate", + "ĠHit man", + "Comm ission", + "Ġ( /", + "âĢ¦ .\"", + "Com par", + "Ġampl ification", + "om inated", + "Ġreg ress", + "ĠColl ider", + "Ġinform ants", + "Ġg azed" + ] + } +} \ No newline at end of file diff --git a/phi-2/tokenizer_config.json b/phi-2/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..5145e0895f2fe7f1ccb3eb9da69ec74ec9c680db --- /dev/null +++ b/phi-2/tokenizer_config.json @@ -0,0 +1,323 @@ +{ + "add_prefix_space": false, + "added_tokens_decoder": { + "50256": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "50257": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50258": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50259": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50260": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50261": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50262": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50263": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50264": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50265": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50266": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50267": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50268": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50269": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50270": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50271": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50272": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50273": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50274": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50275": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50276": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50277": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50278": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50279": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50280": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50281": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50282": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50283": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50284": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50285": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50286": { + "content": " ", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50287": { + "content": "\t\t\t\t\t\t\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50288": { + "content": "\t\t\t\t\t\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50289": { + "content": "\t\t\t\t\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50290": { + "content": "\t\t\t\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50291": { + "content": "\t\t\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50292": { + "content": "\t\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50293": { + "content": "\t\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + }, + "50294": { + "content": "\t\t", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": false + } + }, + "bos_token": "<|endoftext|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|endoftext|>", + "model_max_length": 2048, + "tokenizer_class": "CodeGenTokenizer", + "unk_token": "<|endoftext|>" +} diff --git a/phi-2/vocab.json b/phi-2/vocab.json new file mode 100644 index 0000000000000000000000000000000000000000..84ef7fb594b5c0979e48bdeddb60a0adef33df0b --- /dev/null +++ b/phi-2/vocab.json @@ -0,0 +1 @@ +{"!":0,"\"":1,"#":2,"$":3,"%":4,"&":5,"'":6,"(":7,")":8,"*":9,"+":10,",":11,"-":12,".":13,"/":14,"0":15,"1":16,"2":17,"3":18,"4":19,"5":20,"6":21,"7":22,"8":23,"9":24,":":25,";":26,"<":27,"=":28,">":29,"?":30,"@":31,"A":32,"B":33,"C":34,"D":35,"E":36,"F":37,"G":38,"H":39,"I":40,"J":41,"K":42,"L":43,"M":44,"N":45,"O":46,"P":47,"Q":48,"R":49,"S":50,"T":51,"U":52,"V":53,"W":54,"X":55,"Y":56,"Z":57,"[":58,"\\":59,"]":60,"^":61,"_":62,"`":63,"a":64,"b":65,"c":66,"d":67,"e":68,"f":69,"g":70,"h":71,"i":72,"j":73,"k":74,"l":75,"m":76,"n":77,"o":78,"p":79,"q":80,"r":81,"s":82,"t":83,"u":84,"v":85,"w":86,"x":87,"y":88,"z":89,"{":90,"|":91,"}":92,"~":93,"¡":94,"¢":95,"£":96,"¤":97,"¥":98,"¦":99,"§":100,"¨":101,"©":102,"ª":103,"«":104,"¬":105,"®":106,"¯":107,"°":108,"±":109,"²":110,"³":111,"´":112,"µ":113,"¶":114,"·":115,"¸":116,"¹":117,"º":118,"»":119,"¼":120,"½":121,"¾":122,"¿":123,"À":124,"Á":125,"Â":126,"Ã":127,"Ä":128,"Å":129,"Æ":130,"Ç":131,"È":132,"É":133,"Ê":134,"Ë":135,"Ì":136,"Í":137,"Î":138,"Ï":139,"Ð":140,"Ñ":141,"Ò":142,"Ó":143,"Ô":144,"Õ":145,"Ö":146,"×":147,"Ø":148,"Ù":149,"Ú":150,"Û":151,"Ü":152,"Ý":153,"Þ":154,"ß":155,"à":156,"á":157,"â":158,"ã":159,"ä":160,"å":161,"æ":162,"ç":163,"è":164,"é":165,"ê":166,"ë":167,"ì":168,"í":169,"î":170,"ï":171,"ð":172,"ñ":173,"ò":174,"ó":175,"ô":176,"õ":177,"ö":178,"÷":179,"ø":180,"ù":181,"ú":182,"û":183,"ü":184,"ý":185,"þ":186,"ÿ":187,"Ā":188,"ā":189,"Ă":190,"ă":191,"Ą":192,"ą":193,"Ć":194,"ć":195,"Ĉ":196,"ĉ":197,"Ċ":198,"ċ":199,"Č":200,"č":201,"Ď":202,"ď":203,"Đ":204,"đ":205,"Ē":206,"ē":207,"Ĕ":208,"ĕ":209,"Ė":210,"ė":211,"Ę":212,"ę":213,"Ě":214,"ě":215,"Ĝ":216,"ĝ":217,"Ğ":218,"ğ":219,"Ġ":220,"ġ":221,"Ģ":222,"ģ":223,"Ĥ":224,"ĥ":225,"Ħ":226,"ħ":227,"Ĩ":228,"ĩ":229,"Ī":230,"ī":231,"Ĭ":232,"ĭ":233,"Į":234,"į":235,"İ":236,"ı":237,"IJ":238,"ij":239,"Ĵ":240,"ĵ":241,"Ķ":242,"ķ":243,"ĸ":244,"Ĺ":245,"ĺ":246,"Ļ":247,"ļ":248,"Ľ":249,"ľ":250,"Ŀ":251,"ŀ":252,"Ł":253,"ł":254,"Ń":255,"Ġt":256,"Ġa":257,"he":258,"in":259,"re":260,"on":261,"Ġthe":262,"er":263,"Ġs":264,"at":265,"Ġw":266,"Ġo":267,"en":268,"Ġc":269,"it":270,"is":271,"an":272,"or":273,"es":274,"Ġb":275,"ed":276,"Ġf":277,"ing":278,"Ġp":279,"ou":280,"Ġan":281,"al":282,"ar":283,"Ġto":284,"Ġm":285,"Ġof":286,"Ġin":287,"Ġd":288,"Ġh":289,"Ġand":290,"ic":291,"as":292,"le":293,"Ġth":294,"ion":295,"om":296,"ll":297,"ent":298,"Ġn":299,"Ġl":300,"st":301,"Ġre":302,"ve":303,"Ġe":304,"ro":305,"ly":306,"Ġbe":307,"Ġg":308,"ĠT":309,"ct":310,"ĠS":311,"id":312,"ot":313,"ĠI":314,"ut":315,"et":316,"ĠA":317,"Ġis":318,"Ġon":319,"im":320,"am":321,"ow":322,"ay":323,"ad":324,"se":325,"Ġthat":326,"ĠC":327,"ig":328,"Ġfor":329,"ac":330,"Ġy":331,"ver":332,"ur":333,"Ġu":334,"ld":335,"Ġst":336,"ĠM":337,"'s":338,"Ġhe":339,"Ġit":340,"ation":341,"ith":342,"ir":343,"ce":344,"Ġyou":345,"il":346,"ĠB":347,"Ġwh":348,"ol":349,"ĠP":350,"Ġwith":351,"Ġ1":352,"ter":353,"ch":354,"Ġas":355,"Ġwe":356,"Ġ(":357,"nd":358,"ill":359,"ĠD":360,"if":361,"Ġ2":362,"ag":363,"ers":364,"ke":365,"Ġ\"":366,"ĠH":367,"em":368,"Ġcon":369,"ĠW":370,"ĠR":371,"her":372,"Ġwas":373,"Ġr":374,"od":375,"ĠF":376,"ul":377,"ate":378,"Ġat":379,"ri":380,"pp":381,"ore":382,"ĠThe":383,"Ġse":384,"us":385,"Ġpro":386,"Ġha":387,"um":388,"Ġare":389,"Ġde":390,"ain":391,"and":392,"Ġor":393,"igh":394,"est":395,"ist":396,"ab":397,"rom":398,"ĠN":399,"th":400,"Ġcom":401,"ĠG":402,"un":403,"op":404,"00":405,"ĠL":406,"Ġnot":407,"ess":408,"Ġex":409,"Ġv":410,"res":411,"ĠE":412,"ew":413,"ity":414,"ant":415,"Ġby":416,"el":417,"os":418,"ort":419,"oc":420,"qu":421,"Ġfrom":422,"Ġhave":423,"Ġsu":424,"ive":425,"ould":426,"Ġsh":427,"Ġthis":428,"nt":429,"ra":430,"pe":431,"ight":432,"art":433,"ment":434,"Ġal":435,"ust":436,"end":437,"--":438,"all":439,"ĠO":440,"ack":441,"Ġch":442,"Ġle":443,"ies":444,"red":445,"ard":446,"âĢ":447,"out":448,"ĠJ":449,"Ġab":450,"ear":451,"iv":452,"ally":453,"our":454,"ost":455,"gh":456,"pt":457,"Ġpl":458,"ast":459,"Ġcan":460,"ak":461,"ome":462,"ud":463,"The":464,"Ġhis":465,"Ġdo":466,"Ġgo":467,"Ġhas":468,"ge":469,"'t":470,"ĠU":471,"rou":472,"Ġsa":473,"Ġj":474,"Ġbut":475,"Ġwor":476,"Ġall":477,"ect":478,"Ġk":479,"ame":480,"Ġwill":481,"ok":482,"Ġwhe":483,"Ġthey":484,"ide":485,"01":486,"ff":487,"ich":488,"pl":489,"ther":490,"Ġtr":491,"..":492,"Ġint":493,"ie":494,"ure":495,"age":496,"Ġne":497,"ial":498,"ap":499,"ine":500,"ice":501,"Ġme":502,"Ġout":503,"ans":504,"one":505,"ong":506,"ions":507,"Ġwho":508,"ĠK":509,"Ġup":510,"Ġtheir":511,"Ġad":512,"Ġ3":513,"Ġus":514,"ated":515,"ous":516,"Ġmore":517,"ue":518,"og":519,"ĠSt":520,"ind":521,"ike":522,"Ġso":523,"ime":524,"per":525,".\"":526,"ber":527,"iz":528,"act":529,"Ġone":530,"Ġsaid":531,"Ġ-":532,"are":533,"Ġyour":534,"cc":535,"ĠTh":536,"Ġcl":537,"ep":538,"ake":539,"able":540,"ip":541,"Ġcont":542,"Ġwhich":543,"ia":544,"Ġim":545,"Ġabout":546,"Ġwere":547,"very":548,"ub":549,"Ġhad":550,"Ġen":551,"Ġcomp":552,",\"":553,"ĠIn":554,"Ġun":555,"Ġag":556,"ire":557,"ace":558,"au":559,"ary":560,"Ġwould":561,"ass":562,"ry":563,"ĠâĢ":564,"cl":565,"ook":566,"ere":567,"so":568,"ĠV":569,"ign":570,"ib":571,"Ġoff":572,"Ġte":573,"ven":574,"ĠY":575,"ile":576,"ose":577,"ite":578,"orm":579,"Ġ201":580,"Ġres":581,"Ġman":582,"Ġper":583,"Ġother":584,"ord":585,"ult":586,"Ġbeen":587,"Ġlike":588,"ase":589,"ance":590,"ks":591,"ays":592,"own":593,"ence":594,"Ġdis":595,"ction":596,"Ġany":597,"Ġapp":598,"Ġsp":599,"int":600,"ress":601,"ations":602,"ail":603,"Ġ4":604,"ical":605,"Ġthem":606,"Ġher":607,"ount":608,"ĠCh":609,"Ġar":610,"Ġif":611,"Ġthere":612,"Ġpe":613,"Ġyear":614,"av":615,"Ġmy":616,"Ġsome":617,"Ġwhen":618,"ough":619,"ach":620,"Ġthan":621,"ru":622,"ond":623,"ick":624,"Ġover":625,"vel":626,"Ġqu":627,"ĊĊ":628,"Ġsc":629,"reat":630,"ree":631,"ĠIt":632,"ound":633,"port":634,"Ġalso":635,"Ġpart":636,"fter":637,"Ġkn":638,"Ġbec":639,"Ġtime":640,"ens":641,"Ġ5":642,"ople":643,"Ġwhat":644,"Ġno":645,"du":646,"mer":647,"ang":648,"Ġnew":649,"----":650,"Ġget":651,"ory":652,"ition":653,"ings":654,"Ġjust":655,"Ġinto":656,"Ġ0":657,"ents":658,"ove":659,"te":660,"Ġpeople":661,"Ġpre":662,"Ġits":663,"Ġrec":664,"Ġtw":665,"ian":666,"irst":667,"ark":668,"ors":669,"Ġwork":670,"ade":671,"ob":672,"Ġshe":673,"Ġour":674,"wn":675,"ink":676,"lic":677,"Ġ19":678,"ĠHe":679,"ish":680,"nder":681,"ause":682,"Ġhim":683,"ons":684,"Ġ[":685,"Ġro":686,"form":687,"ild":688,"ates":689,"vers":690,"Ġonly":691,"oll":692,"Ġspe":693,"ck":694,"ell":695,"amp":696,"Ġacc":697,"Ġbl":698,"ious":699,"urn":700,"ft":701,"ood":702,"Ġhow":703,"hed":704,"Ġ'":705,"Ġafter":706,"aw":707,"Ġatt":708,"ov":709,"ne":710,"Ġplay":711,"erv":712,"ict":713,"Ġcould":714,"itt":715,"Ġam":716,"Ġfirst":717,"Ġ6":718,"Ġact":719,"Ġ$":720,"ec":721,"hing":722,"ual":723,"ull":724,"Ġcomm":725,"oy":726,"old":727,"ces":728,"ater":729,"Ġfe":730,"Ġbet":731,"we":732,"iff":733,"Ġtwo":734,"ock":735,"Ġback":736,").":737,"ident":738,"Ġunder":739,"rough":740,"sel":741,"xt":742,"Ġmay":743,"round":744,"Ġpo":745,"ph":746,"iss":747,"Ġdes":748,"Ġmost":749,"Ġdid":750,"Ġadd":751,"ject":752,"Ġinc":753,"fore":754,"Ġpol":755,"ont":756,"Ġagain":757,"clud":758,"tern":759,"Ġknow":760,"Ġneed":761,"Ġcons":762,"Ġco":763,"Ġ.":764,"Ġwant":765,"Ġsee":766,"Ġ7":767,"ning":768,"iew":769,"ĠThis":770,"ced":771,"Ġeven":772,"Ġind":773,"ty":774,"ĠWe":775,"ath":776,"Ġthese":777,"Ġpr":778,"Ġuse":779,"Ġbecause":780,"Ġfl":781,"ng":782,"Ġnow":783,"ĠâĢĵ":784,"com":785,"ise":786,"Ġmake":787,"Ġthen":788,"ower":789,"Ġevery":790,"ĠUn":791,"Ġsec":792,"oss":793,"uch":794,"Ġem":795,"Ġ=":796,"ĠRe":797,"ied":798,"rit":799,"Ġinv":800,"lect":801,"Ġsupp":802,"ating":803,"Ġlook":804,"man":805,"pect":806,"Ġ8":807,"row":808,"Ġbu":809,"Ġwhere":810,"ific":811,"Ġyears":812,"ily":813,"Ġdiff":814,"Ġshould":815,"Ġrem":816,"Th":817,"In":818,"Ġev":819,"day":820,"'re":821,"rib":822,"Ġrel":823,"ss":824,"Ġdef":825,"Ġright":826,"Ġsy":827,"),":828,"les":829,"000":830,"hen":831,"Ġthrough":832,"ĠTr":833,"__":834,"Ġway":835,"Ġdon":836,"Ġ,":837,"Ġ10":838,"ased":839,"Ġass":840,"ublic":841,"Ġreg":842,"ĠAnd":843,"ix":844,"Ġvery":845,"Ġinclud":846,"other":847,"Ġimp":848,"oth":849,"Ġsub":850,"ĠâĢĶ":851,"Ġbeing":852,"arg":853,"ĠWh":854,"==":855,"ible":856,"Ġdoes":857,"ange":858,"ram":859,"Ġ9":860,"ert":861,"ps":862,"ited":863,"ational":864,"Ġbr":865,"Ġdown":866,"Ġmany":867,"aking":868,"Ġcall":869,"uring":870,"ities":871,"Ġph":872,"ics":873,"als":874,"Ġdec":875,"ative":876,"ener":877,"Ġbefore":878,"ility":879,"Ġwell":880,"Ġmuch":881,"erson":882,"Ġthose":883,"Ġsuch":884,"Ġke":885,"Ġend":886,"ĠBut":887,"ason":888,"ting":889,"Ġlong":890,"ef":891,"Ġthink":892,"ys":893,"Ġbel":894,"Ġsm":895,"its":896,"ax":897,"Ġown":898,"Ġprov":899,"Ġset":900,"ife":901,"ments":902,"ble":903,"ward":904,"Ġshow":905,"Ġpres":906,"ms":907,"omet":908,"Ġob":909,"Ġsay":910,"ĠSh":911,"ts":912,"ful":913,"Ġeff":914,"Ġgu":915,"Ġinst":916,"und":917,"ren":918,"cess":919,"Ġent":920,"ĠYou":921,"Ġgood":922,"Ġstart":923,"ince":924,"Ġmade":925,"tt":926,"stem":927,"olog":928,"up":929,"Ġ|":930,"ump":931,"Ġhel":932,"vern":933,"ular":934,"ually":935,"Ġac":936,"Ġmon":937,"Ġlast":938,"Ġ200":939,"10":940,"Ġstud":941,"ures":942,"ĠAr":943,"self":944,"ars":945,"meric":946,"ues":947,"cy":948,"Ġmin":949,"ollow":950,"Ġcol":951,"io":952,"Ġmod":953,"Ġcount":954,"ĠCom":955,"hes":956,"Ġfin":957,"air":958,"ier":959,"âĢĶ":960,"read":961,"ank":962,"atch":963,"ever":964,"Ġstr":965,"Ġpoint":966,"ork":967,"ĠNew":968,"Ġsur":969,"ool":970,"alk":971,"ement":972,"Ġused":973,"ract":974,"ween":975,"Ġsame":976,"oun":977,"ĠAl":978,"ci":979,"Ġdiffere":980,"Ġwhile":981,"--------":982,"Ġgame":983,"cept":984,"Ġsim":985,"...":986,"Ġinter":987,"ek":988,"Ġreport":989,"Ġprodu":990,"Ġstill":991,"led":992,"ah":993,"Ġhere":994,"Ġworld":995,"Ġthough":996,"Ġnum":997,"arch":998,"imes":999,"ale":1000,"ĠSe":1001,"ĠIf":1002,"//":1003,"ĠLe":1004,"Ġret":1005,"Ġref":1006,"Ġtrans":1007,"ner":1008,"ution":1009,"ters":1010,"Ġtake":1011,"ĠCl":1012,"Ġconf":1013,"way":1014,"ave":1015,"Ġgoing":1016,"Ġsl":1017,"ug":1018,"ĠAmeric":1019,"Ġspec":1020,"Ġhand":1021,"Ġbetween":1022,"ists":1023,"ĠDe":1024,"oot":1025,"It":1026,"Ġear":1027,"Ġagainst":1028,"Ġhigh":1029,"gan":1030,"az":1031,"ather":1032,"Ġexp":1033,"Ġop":1034,"Ġins":1035,"Ġgr":1036,"Ġhelp":1037,"Ġrequ":1038,"ets":1039,"ins":1040,"ĠPro":1041,"ism":1042,"Ġfound":1043,"land":1044,"ata":1045,"uss":1046,"ames":1047,"Ġperson":1048,"Ġgreat":1049,"pr":1050,"Ġsign":1051,"ĠAn":1052,"'ve":1053,"Ġsomet":1054,"Ġser":1055,"hip":1056,"Ġrun":1057,"Ġ:":1058,"Ġter":1059,"irect":1060,"Ġfollow":1061,"Ġdet":1062,"ices":1063,"Ġfind":1064,"12":1065,"Ġmem":1066,"Ġcr":1067,"ered":1068,"ex":1069,"Ġext":1070,"uth":1071,"ense":1072,"co":1073,"Ġteam":1074,"ving":1075,"ouse":1076,"ash":1077,"att":1078,"ved":1079,"Ġsystem":1080,"ĠAs":1081,"der":1082,"ives":1083,"min":1084,"Ġlead":1085,"ĠBl":1086,"cent":1087,"Ġaround":1088,"Ġgovern":1089,"Ġcur":1090,"velop":1091,"any":1092,"Ġcour":1093,"alth":1094,"ages":1095,"ize":1096,"Ġcar":1097,"ode":1098,"Ġlaw":1099,"Ġread":1100,"'m":1101,"con":1102,"Ġreal":1103,"Ġsupport":1104,"Ġ12":1105,"....":1106,"Ġreally":1107,"ness":1108,"Ġfact":1109,"Ġday":1110,"Ġboth":1111,"ying":1112,"Ġserv":1113,"ĠFor":1114,"Ġthree":1115,"Ġwom":1116,"Ġmed":1117,"ody":1118,"ĠThey":1119,"50":1120,"Ġexper":1121,"ton":1122,"Ġeach":1123,"akes":1124,"Ġche":1125,"Ġcre":1126,"ines":1127,"Ġrep":1128,"19":1129,"gg":1130,"illion":1131,"Ġgrou":1132,"ute":1133,"ik":1134,"We":1135,"get":1136,"ER":1137,"Ġmet":1138,"Ġsays":1139,"ox":1140,"Ġduring":1141,"ern":1142,"ized":1143,"ared":1144,"Ġfam":1145,"ically":1146,"Ġhapp":1147,"ĠIs":1148,"Ġchar":1149,"med":1150,"vent":1151,"Ġgener":1152,"ient":1153,"ple":1154,"iet":1155,"rent":1156,"11":1157,"ves":1158,"ption":1159,"Ġ20":1160,"formation":1161,"Ġcor":1162,"Ġoffic":1163,"ield":1164,"Ġtoo":1165,"ision":1166,"Ġinf":1167,"ĠZ":1168,"the":1169,"oad":1170,"Ġpublic":1171,"Ġprog":1172,"ric":1173,"**":1174,"Ġwar":1175,"Ġpower":1176,"view":1177,"Ġfew":1178,"Ġloc":1179,"Ġdifferent":1180,"Ġstate":1181,"Ġhead":1182,"'ll":1183,"Ġposs":1184,"Ġstat":1185,"ret":1186,"ants":1187,"Ġval":1188,"Ġiss":1189,"Ġcle":1190,"ivers":1191,"anc":1192,"Ġexpl":1193,"Ġanother":1194,"ĠQ":1195,"Ġav":1196,"thing":1197,"nce":1198,"Wh":1199,"Ġchild":1200,"Ġsince":1201,"ired":1202,"less":1203,"Ġlife":1204,"Ġdevelop":1205,"ittle":1206,"Ġdep":1207,"Ġpass":1208,"ãĥ":1209,"Ġturn":1210,"orn":1211,"This":1212,"bers":1213,"ross":1214,"ĠAd":1215,"Ġfr":1216,"Ġresp":1217,"Ġsecond":1218,"oh":1219,"Ġ/":1220,"Ġdisc":1221,"Ġ&":1222,"Ġsomething":1223,"Ġcomple":1224,"Ġed":1225,"Ġfil":1226,"Ġmonth":1227,"aj":1228,"uc":1229,"Ġgovernment":1230,"Ġwithout":1231,"Ġleg":1232,"Ġdist":1233,"Ġput":1234,"Ġquest":1235,"ann":1236,"Ġprot":1237,"20":1238,"Ġnever":1239,"ience":1240,"Ġlevel":1241,"Ġart":1242,"Ġthings":1243,"Ġmight":1244,"Ġeffect":1245,"Ġcontro":1246,"Ġcent":1247,"Ġ18":1248,"Ġallow":1249,"Ġbelie":1250,"chool":1251,"ott":1252,"Ġincre":1253,"Ġfeel":1254,"Ġresult":1255,"Ġlot":1256,"Ġfun":1257,"ote":1258,"Ġty":1259,"erest":1260,"Ġcontin":1261,"Ġusing":1262,"Ġbig":1263,"201":1264,"Ġask":1265,"Ġbest":1266,"Ġ)":1267,"IN":1268,"Ġopp":1269,"30":1270,"Ġnumber":1271,"iness":1272,"St":1273,"lease":1274,"Ġca":1275,"Ġmust":1276,"Ġdirect":1277,"Ġgl":1278,"Ġ<":1279,"Ġopen":1280,"Ġpost":1281,"Ġcome":1282,"Ġseem":1283,"ording":1284,"Ġweek":1285,"ately":1286,"ital":1287,"Ġel":1288,"riend":1289,"Ġfar":1290,"Ġtra":1291,"inal":1292,"Ġpri":1293,"ĠUS":1294,"Ġplace":1295,"Ġform":1296,"Ġtold":1297,"\":":1298,"ains":1299,"ature":1300,"ĠTrump":1301,"Ġstand":1302,"Ġ#":1303,"ider":1304,"ĠFr":1305,"Ġnext":1306,"Ġsoc":1307,"Ġpur":1308,"Ġlet":1309,"Ġlittle":1310,"Ġhum":1311,"Ġi":1312,"ron":1313,"15":1314,"Ġ15":1315,"Ġcommun":1316,"Ġmark":1317,"ĠThere":1318,"Ġwr":1319,"ĠThat":1320,"Ġinformation":1321,"ways":1322,"Ġbus":1323,"app":1324,"Ġinvest":1325,"me":1326,"Ġhard":1327,"ained":1328,"ead":1329,"Ġimport":1330,"Ġappro":1331,"Ġtest":1332,"Ġtri":1333,"Ġrest":1334,"osed":1335,"Ġfull":1336,"Ġcare":1337,"ĠSp":1338,"Ġcase":1339,"ON":1340,"Ġsk":1341,"Ġless":1342,"Ġ+":1343,"Ġpartic":1344,"ĠPl":1345,"ably":1346,"uck":1347,"ished":1348,"chn":1349,"be":1350,"Ġlist":1351,"ator":1352,"Ġtop":1353,"Ġadv":1354,"ĠBe":1355,"ruct":1356,"Ġdem":1357,"ration":1358,"ling":1359,"gy":1360,"reen":1361,"ger":1362,"Ġhome":1363,"Ġleft":1364,"Ġbetter":1365,"Ġdata":1366,"Ġ11":1367,"Ġattack":1368,"Ġproble":1369,"line":1370,"ards":1371,"Ġbeh":1372,"ral":1373,"ĠHow":1374,"ĠShe":1375,"arge":1376,"Ġ--":1377,"://":1378,"Ġbro":1379,"ĠPh":1380,"ats":1381,"Ġbuild":1382,"ww":1383,"ided":1384,"aim":1385,"ases":1386,"ency":1387,"Ġmain":1388,"ined":1389,"Ġincluding":1390,"Ġ{":1391,"Ġgot":1392,"Ġinterest":1393,"Ġkeep":1394,"ĠX":1395,"Ġeas":1396,"aining":1397,"Ġclass":1398,"âĢ¦":1399,"ĠNo":1400,"Ġvar":1401,"Ġsmall":1402,"ample":1403,"AT":1404,"Ġide":1405,"ĠSo":1406,"Ġrece":1407,"Ġpolit":1408,"Ġmov":1409,"Ġplan":1410,"Ġpercent":1411,"iving":1412,"Ġcamp":1413,"Ġpay":1414,"14":1415,"sc":1416,"ised":1417,"Ġunt":1418,"oney":1419,"ploy":1420,"====":1421,"Ġdidn":1422,"ĠInd":1423,"els":1424,"ertain":1425,"Ġpos":1426,"____":1427,"iver":1428,"Ġprocess":1429,"Ġprogram":1430,"ified":1431,"ĠRep":1432,"16":1433,"uro":1434,"ology":1435,"atter":1436,"ina":1437,"Ġname":1438,"ĠAll":1439,"Ġfour":1440,"Ġreturn":1441,"vious":1442,"bs":1443,"Ġcalled":1444,"Ġmove":1445,"ĠSc":1446,"ird":1447,"Ġgroup":1448,"Ġbre":1449,"Ġmen":1450,"Ġcap":1451,"ten":1452,"ee":1453,"Ġdri":1454,"leg":1455,"here":1456,"uthor":1457,"Ġpat":1458,"Ġcurrent":1459,"ides":1460,"Ġpop":1461,"to":1462,"ention":1463,"Ġalways":1464,"Ġmil":1465,"Ġwomen":1466,"Ġ16":1467,"Ġold":1468,"iven":1469,"raph":1470,"ĠOr":1471,"ror":1472,"ently":1473,"Ġnear":1474,"ĠEx":1475,"ream":1476,"sh":1477,"Ġ14":1478,"Ġfree":1479,"ission":1480,"stand":1481,"ĠCon":1482,"ality":1483,"used":1484,"13":1485,"Ġdesign":1486,"Ġchange":1487,"Ġchang":1488,"Ġbo":1489,"Ġvis":1490,"ember":1491,"Ġbook":1492,"ready":1493,"Ġkill":1494,"25":1495,"pped":1496,"Ġaway":1497,"Ġable":1498,"Ġcountry":1499,"Ġconst":1500,"arn":1501,"Ġorder":1502,"AR":1503,"ior":1504,"ium":1505,"orth":1506,"18":1507,"ailable":1508,"Ġsw":1509,"Ġmillion":1510,"Ġ13":1511,"atic":1512,"ted":1513,"ĠGo":1514,"Ġoper":1515,"eng":1516,"Ġthing":1517,"ajor":1518,"conom":1519,"ĠComm":1520,"Ġwhy":1521,"ured":1522,"ural":1523,"Ġschool":1524,"by":1525,"ĠMar":1526,"Ġaff":1527,"Ġdays":1528,"Ġann":1529,"ush":1530,"ane":1531,"If":1532,"eg":1533,"Ġprof":1534,"Ġhealth":1535,"outh":1536,"But":1537,"ional":1538,".,":1539,"Ġsol":1540,"Ġalready":1541,"Ġ30":1542,"Ġcharact":1543,"He":1544,"Ġfriend":1545,"ES":1546,"ians":1547,"icle":1548,"'d":1549,"ĠOn":1550,"Ġleast":1551,"Ġprom":1552,"Ġdr":1553,"Ġhist":1554,"ither":1555,"Ġest":1556,"iqu":1557,"17":1558,"son":1559,"Ġtell":1560,"Ġtalk":1561,"ohn":1562,"oint":1563,"lection":1564,"AN":1565,"Ġuntil":1566,"augh":1567,"Ġlater":1568,"Ġve":1569,"Ġview":1570,"ending":1571,"ived":1572,"Ġword":1573,"ware":1574,"Ġcost":1575,"Ġenough":1576,"Ġgive":1577,"ĠUnited":1578,"Ġtechn":1579,"arent":1580,"OR":1581,"Ġpar":1582,"ĠDr":1583,"Ġ2016":1584,"rist":1585,"ering":1586,"ĠÂ":1587,"Ġlarge":1588,"side":1589,"acy":1590,"ccess":1591,"Ġwin":1592,"Ġimportant":1593,"Ġ199":1594,"Ġdoesn":1595,"Ġ17":1596,"Ġbusiness":1597,"Ġclear":1598,"Ġrese":1599,"\",":1600,"ury":1601,"Ġequ":1602,"aster":1603,"alf":1604,"ĠAmerican":1605,"nect":1606,"Ġexpect":1607,"iversity":1608,"Ġocc":1609,"ĠFl":1610,"Ġkind":1611,"Ġmean":1612,"Ġpast":1613,"Ġdev":1614,"Ġbas":1615,"let":1616,"raft":1617,"Ġorgan":1618,"Ġdel":1619,"Ġperform":1620,"Ġstory":1621,"Ġseason":1622,"ĠCol":1623,"Ġclaim":1624,"Ġcame":1625,"Ġwithin":1626,"Ġline":1627,"Ġproject":1628,"ĠAt":1629,"Ġcontrol":1630,"ended":1631,"ĠSy":1632,"Ġair":1633,"ization":1634,"Ġ*":1635,"ley":1636,"Ġmoney":1637,"idd":1638,"You":1639,"for":1640,"Ġfamily":1641,"Ġmaking":1642,"Ġbit":1643,"Ġpolice":1644,"Ġhappen":1645,"Ġvers":1646,"ony":1647,"uff":1648,"ĠWhen":1649,"Ġsit":1650,"ideo":1651,"lf":1652,"ison":1653,"Ġsure":1654,"gin":1655,"Ġappear":1656,"Ġlight":1657,"Ġes":1658,"of":1659,"Ġwater":1660,"Ġtimes":1661,"not":1662,"Ġgrow":1663,"Ġcompany":1664,"ĠTe":1665,"ows":1666,"Ġmar":1667,"ource":1668,"iol":1669,"arm":1670,"br":1671,"Ġexample":1672,"Ġconc":1673,"Ġfore":1674,"ĠTo":1675,"pro":1676,"EN":1677,"ries":1678,"Ġ25":1679,"ĠCan":1680,"ney":1681,"Ġactually":1682,"Ġever":1683,"urity":1684,"aken":1685,"aps":1686,"Ġtax":1687,"Ġmajor":1688,"ama":1689,"Ġoften":1690,"eral":1691,"Ġhuman":1692,"Ġjob":1693,"ister":1694,"Ġavailable":1695,"ocr":1696,"enn":1697,"aid":1698,"ivid":1699,"Ġrecord":1700,"?\"":1701,"Ġsing":1702,"ĠAm":1703,"idence":1704,"Ġnews":1705,"ster":1706,"Ġeconom":1707,"Ġfollowing":1708,"ĠBr":1709,"ising":1710,"Ġhour":1711,"most":1712,"ument":1713,"Ġsex":1714,"Ġdesc":1715,"Ġbecome":1716,"ĠEd":1717,"Ġtook":1718,"Ġhaving":1719,"Ġproduct":1720,"ault":1721,"As":1722,"aring":1723,"Ġmeans":1724,"Ġhop":1725,"une":1726,"Ġcho":1727,"Ġcertain":1728,"Ġnon":1729,"Ġdeal":1730,"24":1731,"lement":1732,"oci":1733,"ene":1734,"Ġside":1735,"ĠPr":1736,"ĠMay":1737,"Ġreason":1738,"ued":1739,"ched":1740,"ulation":1741,"Ġelect":1742,"Ġofficial":1743,"Ġpossible":1744,"Ġhold":1745,"ands":1746,"ots":1747,"Ġcity":1748,"ories":1749,"Ġsever":1750,"Ġchildren":1751,"Ġonce":1752,"Ġactiv":1753,"ler":1754,"Ġnight":1755,"itions":1756,"ĠJohn":1757,"ape":1758,"play":1759,"Ġdone":1760,"Ġlim":1761,"Ġworking":1762,"ĠPres":1763,"orld":1764,"eb":1765,"ĠCo":1766,"Ġbody":1767,"ails":1768,"utes":1769,"ĠMr":1770,"Ġwhether":1771,"Ġauthor":1772,"rop":1773,"Ġproper":1774,"Ġseen":1775,");":1776,"Ġfac":1777,"ĠSu":1778,"Ġcond":1779,"iting":1780,"Ġcourse":1781,"Ġ}":1782,"----------------":1783,"aign":1784,"Ġevent":1785,"Ġeng":1786,"Ġpot":1787,"Ġintern":1788,"iam":1789,"Ġshort":1790,"empt":1791,"ãĤ":1792,"ĠGod":1793,"ilar":1794,"80":1795,"Ġorig":1796,"IS":1797,"ourn":1798,"ability":1799,"itive":1800,"Ġdam":1801,"Ġ100":1802,"Ġpress":1803,"Ġdoing":1804,"Ġprotect":1805,"ring":1806,"Ġthought":1807,"Ġquestion":1808,"rew":1809,"ĠWar":1810,"Ġseveral":1811,"ĠState":1812,"Ġgiven":1813,"Ġfund":1814,"ĠTw":1815,"Ġwent":1816,"ances":1817,"work":1818,"por":1819,"my":1820,"40":1821,"Ġarg":1822,"artment":1823,"ustom":1824,"Ġpolic":1825,"Ġmeet":1826,"Ġcreat":1827,"22":1828,"ĠStates":1829,"Ġgames":1830,"raw":1831,"uture":1832,"Ġunderstand":1833,"urs":1834,"ĠOb":1835,"lish":1836,"sy":1837,"Ġmakes":1838,"Ġwon":1839,"agon":1840,"Ġhtt":1841,"Ġlove":1842,"ential":1843,"Ġcomplete":1844,"par":1845,"ĠIm":1846,"AL":1847,"Ġaccount":1848,"Âł":1849,"ored":1850,"vert":1851,"Ġident":1852,"Ġ2015":1853,"Ġothers":1854,"ĠMin":1855,"iber":1856,"verage":1857,"There":1858,"itional":1859,"dd":1860,"Ġprob":1861,"Ġyoung":1862,"Ġalong":1863,"Ġaccording":1864,"Ġyet":1865,"Ġmembers":1866,"ĠWhat":1867,"oid":1868,"ĠMan":1869,"And":1870,"Ġamong":1871,"ai":1872,"Ġemploy":1873,"ĠRes":1874,"Ġ>":1875,"Ġinvol":1876,"Ġlow":1877,"af":1878,"ĠCar":1879,"Ġhig":1880,"ĠOne":1881,"ĠSec":1882,"ination":1883,"Ġlikely":1884,"Ġant":1885,"aged":1886,"ĠRuss":1887,"Ġben":1888,"Ġrele":1889,"For":1890,"back":1891,"ĠNot":1892,"Ġpresident":1893,"ball":1894,"Ġaccess":1895,"ividual":1896,"ĠDem":1897,"ĠEuro":1898,"60":1899,"Ġknown":1900,"irl":1901,"ĠGr":1902,"Ġearly":1903,"use":1904,"iety":1905,"âĢĵ":1906,"Ġfight":1907,"Ġsent":1908,"Ġtoday":1909,"Ġmarket":1910,"\".":1911,"Ġbased":1912,"Ġstrong":1913,"urther":1914,"Ġdeb":1915,"mber":1916,"Ġproblem":1917,"Ġdeath":1918,"Ġsocial":1919,"imate":1920,"AS":1921,"ortun":1922,"Ġcampaign":1923,"ery":1924,"Ch":1925,"Ġey":1926,"ially":1927,"Ġmus":1928,"wh":1929,"pos":1930,"Ġer":1931,"Ġsaf":1932,"Ġmonths":1933,"iron":1934,"Ġviol":1935,"Ġfive":1936,"Ġstre":1937,"Ġplayers":1938,"inc":1939,"ald":1940,"year":1941,"aun":1942,"Ġsuccess":1943,"Ġpresent":1944,"erence":1945,"Ġ2014":1946,"Ġsugg":1947,"Ġparticular":1948,"Ġtry":1949,"Ġsuggest":1950,"ĠChrist":1951,"ones":1952,"Ġpriv":1953,"23":1954,"Ġcrit":1955,"Ġland":1956,"Ġlocal":1957,"ify":1958,"29":1959,"Ġaut":1960,"ED":1961,"ĠGu":1962,"Ġmult":1963,"Ġpolitical":1964,"Ġasked":1965,"Ġformer":1966,"itter":1967,"ript":1968,"Ġclose":1969,"Ġpract":1970,"ĠYork":1971,"Ġgetting":1972,"Ġacross":1973,"Ġcomb":1974,"Ġbelieve":1975,"Ġz":1976,"Ġtoget":1977,"Ġtogether":1978,"ĠCent":1979,"irc":1980,"Ġindividual":1981,"ĠMc":1982,"27":1983,"isk":1984,"ĠEng":1985,"Ġface":1986,"Ġ24":1987,"Ġvalue":1988,"Ġarea":1989,"ev":1990,"Ġwrit":1991,"ĠPresident":1992,"Ġvot":1993,"Ġkey":1994,"Ġmom":1995,"put":1996,"Ġanything":1997,"Ġexperience":1998,"attle":1999,"Ġmind":2000,"aff":2001,"omm":2002,"Ġfuture":2003,"ged":2004,"Ġcut":2005,"Ġtot":2006,"itch":2007,"Ġvideo":2008,"Ġinvestig":2009,"Ġnet":2010,"ĠMy":2011,"rict":2012,"ien":2013,".)":2014,"Ġimpro":2015,"though":2016,"wards":2017,"Ġconnect":2018,"ĠMed":2019,"selves":2020,"ensive":2021,"mb":2022,"ober":2023,"ators":2024,"An":2025,"Ġ50":2026,"Ġredu":2027,"resent":2028,"Ġabove":2029,"Ġfre":2030,"ĠEurope":2031,"sw":2032,"Ġamount":2033,"ĠApp":2034,"Ġeither":2035,"Ġmilit":2036,"Ġanal":2037,"Ġfail":2038,"ĠEn":2039,"ales":2040,"Ġspecial":2041,"Ġblack":2042,"IT":2043,"cher":2044,"Ġlooking":2045,"Ġfire":2046,"yn":2047,"Ġalmost":2048,"oon":2049,"Ġstudy":2050,"Ġmiss":2051,"ches":2052,"rown":2053,"Ġtre":2054,"Ġcommunity":2055,"Ġmedia":2056,"Ġfood":2057,"Ġcomes":2058,"ĠUniversity":2059,"Ġsingle":2060,"What":2061,"uly":2062,"Ġhalf":2063,"ague":2064,"hod":2065,"ĠRepublic":2066,"Ġstarted":2067,"Ġquick":2068,"oto":2069,"book":2070,"Ġissue":2071,"itor":2072,"Ġelse":2073,"Ġconsider":2074,"26":2075,"rodu":2076,"Ġtaken":2077,"28":2078,"99":2079,"ĠWith":2080,"Ġtrue":2081,"Ġwa":2082,"Ġtrad":2083,"Ġago":2084,"Ġmess":2085,"ief":2086,"Ġadded":2087,"oke":2088,"Ġbad":2089,"Ġfav":2090,"33":2091,"Ġsimilar":2092,"ask":2093,"ĠDon":2094,"Ġcharacter":2095,"orts":2096,"ĠHouse":2097,"Ġreported":2098,"Ġtype":2099,"val":2100,"iod":2101,"ĠHowever":2102,"Ġtarg":2103,"Ġentire":2104,"pping":2105,"Ġhistory":2106,"Ġlive":2107,"ffic":2108,"........":2109,"ederal":2110,"Ġtrying":2111,"Ġdiscuss":2112,"ĠHar":2113,"aces":2114,"lished":2115,"Ġself":2116,"osp":2117,"rest":2118,"Ġroom":2119,"elt":2120,"Ġfall":2121,"olution":2122,"Ġet":2123,"Ġx":2124,"Ġisn":2125,"Ġidea":2126,"bo":2127,"Ġsound":2128,"ĠDep":2129,"Ġsomeone":2130,"cially":2131,"ully":2132,"Ġfoc":2133,"Ġobject":2134,"ift":2135,"aper":2136,"Ġplayer":2137,"Ġrather":2138,"Ġservice":2139,"ashing":2140,"ĠDo":2141,"ĠPart":2142,"rug":2143,"mon":2144,"ply":2145,"Ġmor":2146,"Ġnothing":2147,"Ġprovide":2148,"IC":2149,"ung":2150,"Ġparty":2151,"Ġexist":2152,"Ġmag":2153,"70":2154,"Ġrul":2155,"Ġhouse":2156,"Ġbehind":2157,"Ġhowever":2158,"ĠWorld":2159,"Ġsum":2160,"Ġapplic":2161,"Ġ;":2162,"Ġfunction":2163,"gr":2164,"ĠPol":2165,"Ġfront":2166,"200":2167,"Ġseries":2168,"Ġtem":2169,"Ġtyp":2170,"ills":2171,"Ġopt":2172,"Ġpoints":2173,"Ġbelow":2174,"itted":2175,"Ġspecific":2176,"Ġ2017":2177,"umb":2178,"Ġra":2179,"Ġprevious":2180,"Ġpret":2181,"reme":2182,"Ġcustom":2183,"Ġcourt":2184,"ĠMe":2185,"Ġrepl":2186,"Ġwhole":2187,"go":2188,"cer":2189,"Ġtreat":2190,"ĠAct":2191,"Ġprobably":2192,"Ġlearn":2193,"ender":2194,"ĠAss":2195,"Ġversion":2196,"now":2197,"Ġcheck":2198,"ĠCal":2199,"RE":2200,"minist":2201,"On":2202,"ources":2203,"Ġbenef":2204,"Ġdoc":2205,"Ġdeter":2206,"Ġenc":2207,"Ġsuper":2208,"Ġaddress":2209,"Ġvict":2210,"Ġ2013":2211,"Ġmeas":2212,"tr":2213,"Ġfield":2214,"When":2215,"Ġsignific":2216,"uge":2217,"Ġfeat":2218,"Ġcommon":2219,"load":2220,"Ġbegin":2221,"Ġbring":2222,"Ġaction":2223,"erman":2224,"Ġdescrib":2225,"Ġindust":2226,"Ġwanted":2227,"ried":2228,"ming":2229,"Ġattempt":2230,"45":2231,"fer":2232,"Ġdue":2233,"ression":2234,"##":2235,"Ġshall":2236,"Ġsix":2237,"oo":2238,"Ġstep":2239,"Ġpub":2240,"Ġhimself":2241,"Ġ23":2242,"Ġcop":2243,"Ġdest":2244,"Ġstop":2245,"AC":2246,"ibility":2247,"Ġlab":2248,"icult":2249,"Ġhours":2250,"Ġcreate":2251,"Ġfurther":2252,"ĠAmerica":2253,"ĠCity":2254,"Ġdou":2255,"head":2256,"ST":2257,"ĠNorth":2258,"cing":2259,"Ġnational":2260,"ule":2261,"ĠInst":2262,"Ġtaking":2263,"ĠQu":2264,"irt":2265,"Ġred":2266,"Ġresearch":2267,"viron":2268,"ĠGe":2269,"Ġbreak":2270,"ana":2271,"Ġspace":2272,"aterial":2273,"Ġrecent":2274,"ĠAb":2275,"Ġgeneral":2276,"Ġhit":2277,"Ġperiod":2278,"Ġeverything":2279,"ively":2280,"Ġphys":2281,"Ġsaying":2282,"anks":2283,"Ġcou":2284,"Ġcult":2285,"aced":2286,"eal":2287,"uation":2288,"Ġcoun":2289,"lu":2290,"Ġinclude":2291,"Ġposition":2292,"ĠAfter":2293,"ĠCanad":2294,"ĠEm":2295,"Ġimm":2296,"ĠRed":2297,"Ġpick":2298,"Ġcompl":2299,"Ġmatter":2300,"reg":2301,"ext":2302,"angu":2303,"isc":2304,"ole":2305,"aut":2306,"Ġcompet":2307,"eed":2308,"fect":2309,"Ġ21":2310,"ĠSen":2311,"ĠThese":2312,"asing":2313,"Ġcannot":2314,"Ġinit":2315,"Ġrelations":2316,"ached":2317,"Ġbar":2318,"Ġ40":2319,"ĠTH":2320,"Ġ2012":2321,"Ġvol":2322,"Ġground":2323,"Ġsecurity":2324,"Ġupd":2325,"ilt":2326,"35":2327,"Ġconcern":2328,"ĠJust":2329,"Ġwhite":2330,"Ġseems":2331,"ĠHer":2332,"pecially":2333,"ients":2334,"Ġannoun":2335,"Ġfig":2336,"ights":2337,"Ġstri":2338,"like":2339,"ids":2340,"Ġsus":2341,"Ġwatch":2342,"Ġâ":2343,"Ġwind":2344,"ĠCont":2345,"Ġitself":2346,"Ġmass":2347,"Al":2348,"yle":2349,"ique":2350,"ĠNational":2351,"Ġabs":2352,"Ġpack":2353,"Ġoutside":2354,"Ġanim":2355,"Ġpain":2356,"eter":2357,"Ġmanag":2358,"duct":2359,"ogn":2360,"Ġ]":2361,"ĠSept":2362,"sec":2363,"off":2364,"ĠJan":2365,"Ġfoot":2366,"ades":2367,"Ġthird":2368,"Ġmot":2369,"Ġevidence":2370,"inton":2371,"Ġthreat":2372,"apt":2373,"ples":2374,"cle":2375,"Ġlo":2376,"Ġdecl":2377,"Ġitem":2378,"medi":2379,"Ġrepresent":2380,"omb":2381,"amer":2382,"Ġsignificant":2383,"ograph":2384,"su":2385,"Ġcal":2386,"ires":2387,"0000":2388,"ID":2389,"AM":2390,"Ġsimply":2391,"Ġlonger":2392,"Ġfile":2393,"OT":2394,"che":2395,"So":2396,"ateg":2397,"org":2398,"ĠHis":2399,"Ġener":2400,"Ġdom":2401,"Ġupon":2402,"ili":2403,"\":\"":2404,"Ġthemselves":2405,"Ġcoming":2406,"Ġquite":2407,"Ġdifficult":2408,"ĠBar":2409,"ilities":2410,"rel":2411,"ends":2412,"cial":2413,"64":2414,"Ġwoman":2415,"rap":2416,"yr":2417,"Ġnecess":2418,"ips":2419,"Ġtext":2420,"Ġrequire":2421,"Ġmilitary":2422,"Ġreview":2423,"Ġrespons":2424,"75":2425,"Ġsubject":2426,"Ġinstead":2427,"Ġissues":2428,"Ġgen":2429,"\",\"":2430,"Ġminutes":2431,"Ġweap":2432,"ray":2433,"amed":2434,"time":2435,"bl":2436,"How":2437,"Ġcode":2438,"ĠSm":2439,"Ġhigher":2440,"ĠSte":2441,"ris":2442,"Ġpage":2443,"Ġstudents":2444,"ĠIntern":2445,"Ġmethod":2446,"ĠAug":2447,"ĠPer":2448,"ĠAg":2449,"Ġpolicy":2450,"ĠSw":2451,"Ġexec":2452,"Ġaccept":2453,"ume":2454,"ribut":2455,"Ġwords":2456,"Ġfinal":2457,"Ġchanges":2458,"ĠDemocr":2459,"Ġfriends":2460,"Ġrespect":2461,"Ġep":2462,"Ġcompan":2463,"ivil":2464,"Ġdamage":2465,"****":2466,"ogle":2467,"vironment":2468,"Ġneg":2469,"ental":2470,"Ġap":2471,"Ġtotal":2472,"ival":2473,"!\"":2474,"lim":2475,"Ġneeds":2476,"Ġagre":2477,"Ġdevelopment":2478,"Ġage":2479,"iple":2480,"21":2481,"Ġresults":2482,"ĠAf":2483,"Sh":2484,"Ġgun":2485,"ĠObama":2486,"roll":2487,"Ġ@":2488,"Ġrights":2489,"ĠBrit":2490,"Ġrunning":2491,"Ġwasn":2492,"Ġport":2493,"Ġrate":2494,"Ġpretty":2495,"Ġtarget":2496,"Ġsaw":2497,"Ġcirc":2498,"Ġworks":2499,"icro":2500,"alt":2501,"over":2502,"www":2503,"That":2504,"lier":2505,"Ġeveryone":2506,"ude":2507,"Ġpie":2508,"iddle":2509,"rael":2510,"Ġrad":2511,"Ġblock":2512,"Ġwalk":2513,"To":2514,"ãģ":2515,"nes":2516,"ĠAust":2517,"aul":2518,"rote":2519,"ĠSouth":2520,"ession":2521,"oph":2522,"Ġshows":2523,"Ġsite":2524,"Ġjo":2525,"Ġrisk":2526,"clus":2527,"lt":2528,"Ġinj":2529,"iding":2530,"ĠSpe":2531,"Ġchall":2532,"irm":2533,"Ġ22":2534,"itting":2535,"str":2536,"Ġhy":2537,"LE":2538,"key":2539,"Ġbegan":2540,"atur":2541,"ashington":2542,"lam":2543,"ĠDav":2544,"bit":2545,"Ġsize":2546,"ĠPar":2547,"38":2548,"ournal":2549,"face":2550,"Ġdecision":2551,"Ġlarg":2552,"Ġjud":2553,"rect":2554,"Ġcontinue":2555,"ĠOct":2556,"overed":2557,"ĠInt":2558,"========":2559,"Ġparent":2560,"ĠWill":2561,"Ġeasy":2562,"Ġdrug":2563,"anger":2564,"Ġsense":2565,"Ġdi":2566,"iday":2567,"Ġenergy":2568,"istic":2569,"Ġassoci":2570,"arter":2571,"obal":2572,"eks":2573,"ĠEl":2574,"urch":2575,"Ġgirl":2576,"oe":2577,"itle":2578,"Ġ28":2579,"ĠChe":2580,"Ġrequest":2581,"Ġsoon":2582,"Ġhost":2583,"ky":2584,"Ġstates":2585,"omes":2586,"Ġmaterial":2587,"lex":2588,"Ġmoment":2589,"Ġansw":2590,"onse":2591,"Ġespecially":2592,"Ġnorm":2593,"Ġservices":2594,"pite":2595,"ran":2596,"Ġrole":2597,"44":2598,"):":2599,"Ġcred":2600,"Cl":2601,"________":2602,"Ġmat":2603,"Ġlog":2604,"ĠClinton":2605,"OU":2606,"Ġoffice":2607,"Ġ26":2608,"Ġcharg":2609,"Ġtrack":2610,"ma":2611,"Ġheart":2612,"Ġball":2613,"Ġpersonal":2614,"Ġbuilding":2615,"na":2616,"set":2617,"body":2618,"ĠBlack":2619,"Ġincrease":2620,"itten":2621,"Ġneeded":2622,"36":2623,"32":2624,"=\"":2625,"Ġlost":2626,"Ġbecame":2627,"Ġgroups":2628,"ĠMus":2629,"Ġwrote":2630,"ĠPe":2631,"Ġprop":2632,"joy":2633,"é":2634,"ĠWhite":2635,"Ġdead":2636,".'":2637,"Ġhttp":2638,"Ġwebs":2639,"OS":2640,"Ġinside":2641,"Ġwrong":2642,"Ġstatement":2643,"Ġ...":2644,"yl":2645,"Ġfilm":2646,"Ġmusic":2647,"Ġshare":2648,"ification":2649,"Ġrelease":2650,"Ġforward":2651,"Ġstay":2652,"Ġcomput":2653,"itte":2654,"ser":2655,"Ġoriginal":2656,"Ġcard":2657,"Ġcand":2658,"Ġdiv":2659,"atural":2660,"Ġfavor":2661,"OM":2662,"Ġcases":2663,"uses":2664,"Ġsection":2665,"Ġleave":2666,"ging":2667,"oved":2668,"ĠWashington":2669,"39":2670,"ĠGl":2671,"Ġrequired":2672,"action":2673,"apan":2674,"oor":2675,"iter":2676,"ĠKing":2677,"Ġcountries":2678,"ĠGerman":2679,"lling":2680,"Ġ27":2681,"34":2682,"Ġquestions":2683,"Ġprim":2684,"Ġcell":2685,"Ġshoot":2686,"Ġanyone":2687,"ĠWest":2688,"Ġaffect":2689,"epend":2690,"Ġonline":2691,"ĠIsrael":2692,"ĠSeptember":2693,"Ġability":2694,"Ġcontent":2695,"ises":2696,"Ġreve":2697,"Ġlaun":2698,"Ġindic":2699,"Ġforce":2700,"cast":2701,"Ġsold":2702,"aving":2703,"fl":2704,"Ġsoft":2705,"Ġcompanies":2706,"ceed":2707,"Ġarticle":2708,"Ġaud":2709,"Ġrev":2710,"Ġeduc":2711,"Ġplaying":2712,"05":2713,"Ġheld":2714,"ctor":2715,"Ġreleased":2716,"Ġfederal":2717,"37":2718,"Ġadminist":2719,"Ġinterview":2720,"Ġinstall":2721,"Ġreceived":2722,"Ġsource":2723,"uk":2724,"Ph":2725,"Ġserious":2726,"Ġcreated":2727,"Ġcause":2728,"Ġimmedi":2729,"Ġdefin":2730,"uel":2731,"ĠDepartment":2732,"ctions":2733,"ĠCour":2734,"ĠNow":2735,"ze":2736,"ites":2737,"itution":2738,"Ġlate":2739,"Ġspeak":2740,"ners":2741,"Ġlegal":2742,"ari":2743,"ĠCor":2744,"Ġweeks":2745,"Ġmodel":2746,"Ġpred":2747,"Ġexact":2748,"BC":2749,"ĠBy":2750,"ING":2751,"osing":2752,"Ġtakes":2753,"Ġregard":2754,"Ġopportun":2755,"Ġprice":2756,"Ġ198":2757,"ĠApr":2758,"fully":2759,"Ġord":2760,"Ġproblems":2761,"ruction":2762,"ham":2763,"ĠCount":2764,"lege":2765,"Ġleaders":2766,"ET":2767,"lev":2768,"Ġdeep":2769,"ological":2770,"ese":2771,"haps":2772,"ĠSome":2773,"Ġpers":2774,"Ġcontract":2775,"Ġrelationship":2776,"sp":2777,"oud":2778,"Ġbase":2779,"48":2780,"mit":2781,"Ad":2782,"ancial":2783,"Ġconsum":2784,"Ġpotential":2785,"Ġlangu":2786,"rem":2787,"eth":2788,"Ġrelig":2789,"ressed":2790,"66":2791,"Ġlink":2792,"Ġlower":2793,"ayer":2794,"ĠJune":2795,"Ġfem":2796,"unt":2797,"erc":2798,"urd":2799,"Ġcontact":2800,"Ġill":2801,"Ġmother":2802,"Ġestab":2803,"htt":2804,"ĠMarch":2805,"ĠBro":2806,"ĠChina":2807,"Ġ29":2808,"Ġsqu":2809,"Ġprovided":2810,"Ġaverage":2811,"asons":2812,"Ġ2011":2813,"Ġexam":2814,"lin":2815,"55":2816,"ned":2817,"Ġperfect":2818,"Ġtou":2819,"alse":2820,"ux":2821,"Ġbuy":2822,"Ġshot":2823,"Ġcollect":2824,"Ġphot":2825,"Ġplayed":2826,"Ġsurpr":2827,"Ġofficials":2828,"Ġsimple":2829,"avy":2830,"Ġindustry":2831,"Ġhands":2832,"ground":2833,"Ġpull":2834,"Ġround":2835,"Ġuser":2836,"Ġrange":2837,"uary":2838,"Ġprivate":2839,"ops":2840,"ees":2841,"Ġways":2842,"ĠMich":2843,"Ġveh":2844,"Ġexcept":2845,"Ġterms":2846,"imum":2847,"pper":2848,"ION":2849,"ores":2850,"ĠDragon":2851,"oul":2852,"Ġden":2853,"Ġperformance":2854,"Ġbill":2855,"cil":2856,"47":2857,"Ġenvironment":2858,"Ġexc":2859,"add":2860,"Ġworth":2861,"Ġpict":2862,"Ġchance":2863,"Ġ2018":2864,"bor":2865,"Ġspeed":2866,"iction":2867,"Ġalleg":2868,"ĠJapan":2869,"atory":2870,"reet":2871,"Ġmatch":2872,"ĠII":2873,"Ġstru":2874,"order":2875,"Ġste":2876,"Ġliving":2877,"Ġstruct":2878,"ino":2879,"Ġsepar":2880,"hern":2881,"Ġresponse":2882,"Ġenjoy":2883,"Ġvia":2884,"AD":2885,"uments":2886,"acebook":2887,"Ġmember":2888,"ibr":2889,"izing":2890,"Ġtool":2891,"ĠMon":2892,"ĠWhile":2893,"hood":2894,"ĠAng":2895,"ĠDef":2896,"Ġoffer":2897,"Tr":2898,"aur":2899,"Ġturned":2900,"ĠJuly":2901,"down":2902,"anced":2903,"Ġrecently":2904,"ĠEar":2905,"Ġce":2906,"ĠStar":2907,"ĠCong":2908,"rought":2909,"Ġblood":2910,"Ġhope":2911,"Ġcomment":2912,"aint":2913,"Ġarri":2914,"iles":2915,"Ġparticip":2916,"ought":2917,"ription":2918,"08":2919,"49":2920,"Ġgave":2921,"Ġselect":2922,"Ġkilled":2923,"sych":2924,"Ġgoes":2925,"ij":2926,"Ġcoll":2927,"Ġimpact":2928,"atives":2929,"ĠSer":2930,"09":2931,"ĠAugust":2932,"Ġboy":2933,"de":2934,"ĠDes":2935,"Ġfelt":2936,"US":2937,"Ġexpected":2938,"Ġimage":2939,"ĠMark":2940,"ccording":2941,"oice":2942,"EC":2943,"ĠMag":2944,"ened":2945,"hold":2946,"ĠPost":2947,"Ġprevent":2948,"No":2949,"Ġinvolved":2950,"Ġeyes":2951,"Ġquickly":2952,"At":2953,"unk":2954,"Ġbehav":2955,"Ġur":2956,"Ġled":2957,"come":2958,"ey":2959,"Ġcandid":2960,"Ġearlier":2961,"Ġfocus":2962,"ety":2963,"Pro":2964,"ledge":2965,"ixed":2966,"illed":2967,"Ġpopular":2968,"AP":2969,"Ġsett":2970,"light":2971,"Ġvarious":2972,"inks":2973,"Ġlevels":2974,"Ġroad":2975,"ellig":2976,"ables":2977,"hel":2978,"ittee":2979,"ĠGener":2980,"ype":2981,"Ġheard":2982,"icles":2983,"Ġmis":2984,"Ġusers":2985,"ĠSan":2986,"Ġimprove":2987,"Ġfather":2988,"Ġsearch":2989,"They":2990,"vil":2991,"Ġprofess":2992,"Ġknew":2993,"Ġloss":2994,"Ġevents":2995,"65":2996,"Ġbillion":2997,"07":2998,"02":2999,"ĠNews":3000,"ĠAM":3001,"Ġcover":3002,"where":3003,"ension":3004,"Ġbott":3005,"Ġareas":3006,"ences":3007,"ope":3008,"ĠTwitter":3009,"ael":3010,"Ġgets":3011,"ĠGoogle":3012,"Ġsn":3013,"iant":3014,"Ġvote":3015,"Ġnearly":3016,"Ġincluded":3017,"Ġrecogn":3018,"zz":3019,"mm":3020,"aled":3021,"Ġhappened":3022,"04":3023,"Ġhot":3024,"Ġwhose":3025,"Ġcivil":3026,"Ġsuff":3027,"oes":3028,"itiz":3029,"ĠSyri":3030,"Ġrespond":3031,"Ġhon":3032,"Ġfeatures":3033,"Ġeconomic":3034,"ĠApril":3035,"rim":3036,"Ġtechnology":3037,"Ġoption":3038,"aging":3039,"Ġpurch":3040,"Re":3041,"Ġlat":3042,"chie":3043,"isl":3044,"Ġrecomm":3045,"uf":3046,"Ġtraining":3047,"Ġeffects":3048,"Ġfast":3049,"Ġ2010":3050,"Ġoccur":3051,"Ġwebsite":3052,"Ġemail":3053,"Ġsens":3054,"ech":3055,"Ġoil":3056,"Ġinflu":3057,"Ġcurrently":3058,"ĠSch":3059,"ĠAdd":3060,"Ġgoal":3061,"Ġscient":3062,"Ġconv":3063,"100":3064,"emy":3065,"Ġdecided":3066,"Ġtravel":3067,"Ġmention":3068,"LL":3069,"03":3070,"Ġelection":3071,"Ġphone":3072,"Ġlooks":3073,"Ġsituation":3074,"Ġcy":3075,"Ġhor":3076,"bed":3077,"ĠCourt":3078,"aily":3079,"aves":3080,"Ġquality":3081,"ĠComp":3082,"wise":3083,"Ġtable":3084,"Ġstaff":3085,"ĠWind":3086,"ett":3087,"Ġtried":3088,"idered":3089,"Ġaddition":3090,"Ġbox":3091,"Ġlack":3092,"arily":3093,"Ġwide":3094,"Ġmid":3095,"Ġboard":3096,"ysis":3097,"Ġanti":3098,"ha":3099,"Ġdig":3100,"ening":3101,"Ġdro":3102,"Con":3103,"68":3104,"Ġslow":3105,"based":3106,"sequ":3107,"Ġpath":3108,"Ex":3109,"aker":3110,"Ġworked":3111,"Ġpen":3112,"Ġengine":3113,"Ġlooked":3114,"ĠSuper":3115,"ĠServ":3116,"Ġvictim":3117,"Un":3118,"Ġproperty":3119,"Ġintrodu":3120,"Ġexecut":3121,"ĠPM":3122,"Le":3123,"Ġcolor":3124,"ĠMore":3125,"Ġ60":3126,"Ġnetwork":3127,"Ġdate":3128,"cul":3129,"idge":3130,"Ġextra":3131,"31":3132,"Ġsle":3133,"67":3134,"Ġwond":3135,"Ġreports":3136,"just":3137,"ĠAustral":3138,"Ġcapital":3139,"Ġens":3140,"Ġcommand":3141,"Ġallowed":3142,"Ġprep":3143,"Ġcapt":3144,"hib":3145,"Ġnumbers":3146,"chan":3147,"Ġfair":3148,"mp":3149,"oms":3150,"Ġreach":3151,"With":3152,"tain":3153,"Ġbroad":3154,"Ġcouple":3155,"ecause":3156,"lying":3157,"ĠFeb":3158,"Ġscreen":3159,"Ġlives":3160,"Ġprior":3161,"ĠCongress":3162,"Ar":3163,"Ġapproach":3164,"Ġemer":3165,"aries":3166,"ĠDis":3167,"serv":3168,"ĠNe":3169,"Ġbuilt":3170,"cies":3171,"Ġrepe":3172,"Ġrules":3173,"force":3174,"ĠPal":3175,"Ġfinancial":3176,"Ġconsidered":3177,"ĠChar":3178,"nces":3179,"ĠIS":3180,"Ġbrought":3181,"Ġbi":3182,"iers":3183,"ĠSim":3184,"OP":3185,"Ġproducts":3186,"Ġvisit":3187,"Ġdocument":3188,"Ġconduct":3189,"Ġcompletely":3190,"ining":3191,"ĠCalif":3192,"ibly":3193,"Ġwritten":3194,"ĠTV":3195,"ements":3196,"Ġdraw":3197,"One":3198,"Ġpublished":3199,"Ġsecret":3200,"rain":3201,"het":3202,"ĠFacebook":3203,"onday":3204,"ĠUp":3205,"Ġsexual":3206,"Ġthous":3207,"ĠPat":3208,"Ġess":3209,"Ġstandard":3210,"Ġarm":3211,"ges":3212,"ection":3213,"Ġfell":3214,"Ġforeign":3215,"ani":3216,"ĠFriday":3217,"Ġregular":3218,"inary":3219,"Ġincreased":3220,"Ġusually":3221,"Ġdemon":3222,"Ġdark":3223,"Ġadditional":3224,"rol":3225,"ĠOf":3226,"Ġproduction":3227,"!!":3228,"undred":3229,"Ġinternational":3230,"idents":3231,"ĠFree":3232,"roup":3233,"Ġrace":3234,"Ġmach":3235,"Ġhuge":3236,"All":3237,"lear":3238,"ovember":3239,"Ġtown":3240,"Ġattention":3241,"ĠOff":3242,"yond":3243,"ĠThen":3244,"field":3245,"Ġterror":3246,"raz":3247,"ĠBo":3248,"Ġmeeting":3249,"ĠPark":3250,"Ġarrest":3251,"Ġfear":3252,"Ġaw":3253,"ĠVal":3254,"oring":3255,"',":3256,"Ġextreme":3257,"arr":3258,"Ġworkers":3259,"After":3260,"Ġ31":3261,"net":3262,"ament":3263,"Ġdirectly":3264,"Ġpopulation":3265,"ube":3266,"ĠOctober":3267,"ĠIN":3268,"ĠJanuary":3269,"59":3270,"ĠDavid":3271,"Ġcross":3272,"cember":3273,"ĠFirst":3274,"Ġmessage":3275,"irit":3276,"Ġnation":3277,"Ġpoll":3278,"isions":3279,"Ġanswer":3280,"ny":3281,"isode":3282,"Ġcarry":3283,"ĠRussia":3284,"Ġhear":3285,"ength":3286,"roy":3287,"Ġnatural":3288,"inally":3289,"Ġdog":3290,"mitted":3291,"Ġtrade":3292,"Ġsubst":3293,"Ġmultiple":3294,"ĠAfric":3295,"Ġfans":3296,"Ġsort":3297,"Ġglobal":3298,"ication":3299,"ĠWed":3300,"ara":3301,"Ġachie":3302,"Ġlanguage":3303,"vey":3304,"Ġtal":3305,"Ġnecessary":3306,"Ġdetails":3307,"Ġsen":3308,"ĠSund":3309,"ĠReg":3310,"ĠRec":3311,"06":3312,"Ġsil":3313,"ressive":3314,"Ġmedical":3315,"unch":3316,"ornia":3317,"Ġund":3318,"fort":3319,"ocks":3320,"ĠMonday":3321,"uesday":3322,"craft":3323,"77":3324,"urt":3325,"Ġver":3326,"ĠHill":3327,"Ġreceive":3328,"Ġmorning":3329,"estern":3330,"Ġbank":3331,"Ġsat":3332,"irth":3333,"ĠHigh":3334,"Ġdevice":3335,"ĠTHE":3336,"ĠCenter":3337,"Ġsafe":3338,"Ġple":3339,"ĠCanada":3340,"Ġsystems":3341,"Ġassist":3342,"Ġsurv":3343,"Ġbattle":3344,"ĠSoc":3345,"vertis":3346,"She":3347,"Ġpaper":3348,"Ġgrowth":3349,"Ġcast":3350,"Sc":3351,"Ġplans":3352,"lled":3353,"Ġparts":3354,"Ġwall":3355,"Ġmovement":3356,"Ġpractice":3357,"imately":3358,"Ġdisplay":3359,"Ġsometimes":3360,"omp":3361,"ĠPaul":3362,"ĠYes":3363,"king":3364,"58":3365,"oly":3366,"Ġson":3367,"Ġavoid":3368,"okes":3369,"ĠJew":3370,"Ġtowards":3371,"asc":3372,"Ġ//":3373,"ĠKore":3374,"Ġtalking":3375,"Ġcorrect":3376,"Ġspent":3377,"icks":3378,"iable":3379,"eared":3380,"Ġterm":3381,"Ġwants":3382,"oming":3383,"Ġut":3384,"Ġdoub":3385,"Ġforces":3386,"Ġplease":3387,"69":3388,"ĠNovember":3389,"atform":3390,"ondon":3391,"Ġones":3392,"Ġimmediately":3393,"ĠRussian":3394,"ĠMet":3395,"Ġdeg":3396,"Ġparents":3397,"CH":3398,"ĠAmericans":3399,"aly":3400,"ĠMod":3401,"Ġshown":3402,"Ġconditions":3403,"Ġstuff":3404,"Ġreb":3405,"ĠYour":3406,"Ġincludes":3407,"nown":3408,"ĠSam":3409,"Ġexperien":3410,"mission":3411,"ĠEven":3412,"aught":3413,"Ġannounced":3414,"ĠRepublican":3415,"Ġdetermin":3416,"Ġdescribed":3417,"ĠCounty":3418,"()":3419,"Ġdoor":3420,"Ġchanged":3421,"Ġneigh":3422,"ĠHere":3423,"Ġclean":3424,"Ġpan":3425,"ĠDecember":3426,"ĠEuropean":3427,"iring":3428,"apter":3429,"Ġclub":3430,"ĠTuesday":3431,"Ġpaid":3432,"ĠNet":3433,"Ġattacks":3434,"Ġcharacters":3435,"Ġalone":3436,"Ġdirector":3437,"dom":3438,"Ġ35":3439,"Ġload":3440,"Ġrout":3441,"ĠCalifornia":3442,"Ġfinally":3443,"Ġrac":3444,"Ġcontr":3445,"Ġexactly":3446,"resh":3447,"pri":3448,"ĠIslam":3449,"Ġnature":3450,"Ġcareer":3451,"Ġlatest":3452,"Ġconvers":3453,"ĠSl":3454,"pose":3455,"cient":3456,"ĠInc":3457,"ivity":3458,"88":3459,"ĠAtt":3460,"ĠMor":3461,"nesday":3462,"Ġweight":3463,"ken":3464,"Ġnote":3465,"Ġteams":3466,"Ġ\\":3467,"airs":3468,"ĠGreen":3469,"Ġhundred":3470,"onent":3471,"Ġstreng":3472,"Ġconsist":3473,"icated":3474,"Ġregul":3475,"Ġlic":3476,"astic":3477,"Ġten":3478,"ursday":3479,"elligence":3480,"ously":3481,"ĠUK":3482,"BI":3483,"Ġcosts":3484,"Ġindepend":3485,"ĠAP":3486,"Ġnormal":3487,"Ġhom":3488,"Ġobvious":3489,"Ġswe":3490,"Ġstar":3491,"Ġready":3492,"acher":3493,"Ġimplement":3494,"gest":3495,"Ġsong":3496,"ĠGet":3497,"ĠLab":3498,"Ġinteresting":3499,"using":3500,"Ġgiving":3501,"ĠSunday":3502,"Ġetc":3503,"Ġmiddle":3504,"Ġremember":3505,"right":3506,"osition":3507,"utions":3508,"Ġmax":3509,"46":3510,"Ġyourself":3511,"Ġdemand":3512,"Ġtreatment":3513,"Ġdanger":3514,"ĠCons":3515,"Ġguy":3516,"ĠBritish":3517,"Ġphysical":3518,"Ġrelated":3519,"Ġremain":3520,"Ġcouldn":3521,"Ġrefer":3522,"Ġcitiz":3523,"box":3524,"ENT":3525,"board":3526,"Ġinn":3527,"IG":3528,"ero":3529,"ĠStreet":3530,"ospital":3531,"rench":3532,"chers":3533,"Ġstra":3534,"OL":3535,"ager":3536,"ĠAN":3537,"Ġeasily":3538,"IA":3539,"enge":3540,"iny":3541,"Ġclos":3542,"ocked":3543,"Ġuses":3544,"ĠCoun":3545,"Im":3546,"uild":3547,"??":3548,"more":3549,"Ġang":3550,"Ġwrite":3551,"olute":3552,"57":3553,"Ġleader":3554,"Ġreading":3555,"":3784,"Ġfigure":3785,"Ġdisapp":3786,"enty":3787,"Ġsoftware":3788,"Ġult":3789,"Ġofficers":3790,"New":3791,"Is":3792,"Ġremains":3793,"ĠIndia":3794,"Ġpsych":3795,"rief":3796,"Ġcat":3797,"esc":3798,"Ġobserv":3799,"Ġstage":3800,"ĠDark":3801,"Ġenter":3802,"change":3803,"Ġpassed":3804,"Ġdespite":3805,"ĠOut":3806,"Ġmovie":3807,"rs":3808,"Ġvoice":3809,"mine":3810,"ĠPlay":3811,"Ġtoward":3812,"ĠTer":3813,"Ġregion":3814,"Ġvalues":3815,"orters":3816,"Ġmount":3817,"Ġofficer":3818,"ĠOther":3819,"ban":3820,"Ġhous":3821,"wood":3822,"room":3823,"IV":3824,"ĠSun":3825,"see":3826,"ĠOver":3827,"rog":3828,"90":3829,"Ġlay":3830,"ĠTur":3831,"awn":3832,"Ġpressure":3833,"ĠSub":3834,"Ġbooks":3835,"edom":3836,"ĠSand":3837,"AA":3838,"ago":3839,"Ġreasons":3840,"ford":3841,"Ġactivity":3842,"UT":3843,"Now":3844,"ĠSenate":3845,"cell":3846,"night":3847,"Ġcalls":3848,"inter":3849,"Ġletter":3850,"ĠRob":3851,"ĠJe":3852,"Ġchoose":3853,"ĠLaw":3854,"Get":3855,"Be":3856,"Ġrob":3857,"Ġtypes":3858,"Ġplatform":3859,"Ġquarter":3860,"RA":3861,"ĠTime":3862,"Ġmaybe":3863,"ĠCr":3864,"95":3865,"pre":3866,"Ġmoving":3867,"Ġlif":3868,"Ġgold":3869,"Ġsom":3870,"Ġpatients":3871,"Ġtruth":3872,"ĠKe":3873,"urance":3874,"antly":3875,"mar":3876,"Ġcharge":3877,"ĠGreat":3878,"Ġcele":3879,"--------------------------------":3880,"Ġrock":3881,"roid":3882,"ancy":3883,"Ġcredit":3884,"aud":3885,"By":3886,"ĠEvery":3887,"Ġmoved":3888,"inger":3889,"ribution":3890,"Ġnames":3891,"Ġstraight":3892,"ĠHealth":3893,"ĠWell":3894,"Ġfeature":3895,"Ġrule":3896,"Ġsche":3897,"inated":3898,"ĠMichael":3899,"berg":3900,"41":3901,"iled":3902,"band":3903,"Ġclick":3904,"ĠAngel":3905,"onents":3906,"ÂŃ":3907,"ĠIraq":3908,"ĠSaturday":3909,"Ġaware":3910,"part":3911,"Ġpattern":3912,"OW":3913,"ĠLet":3914,"Ġgrad":3915,"igned":3916,"Ġassociated":3917,"Ġstyle":3918,"no":3919,"iation":3920,"aith":3921,"ilies":3922,"Ġstories":3923,"uration":3924,"Ġindividuals":3925,"ĠâĢ¦":3926,"miss":3927,"ĠAssoci":3928,"ishing":3929,"aby":3930,"Ġsummer":3931,"ĠBen":3932,"Ġ32":3933,"Ġarch":3934,"uty":3935,"ĠTexas":3936,"hol":3937,"Ġfully":3938,"Ġmill":3939,"Ġfollowed":3940,"ĠBill":3941,"ĠIndian":3942,"ĠSecret":3943,"ĠBel":3944,"ĠFebruary":3945,"Ġjobs":3946,"Ġseemed":3947,"ĠGovern":3948,"ipped":3949,"Ġreality":3950,"Ġlines":3951,"Ġpark":3952,"Ġmeasure":3953,"ĠOur":3954,"IM":3955,"Ġbrother":3956,"Ġgrowing":3957,"Ġban":3958,"Ġestim":3959,"Ġcry":3960,"ĠSchool":3961,"Ġmechan":3962,"ĠOF":3963,"ĠWindows":3964,"Ġrates":3965,"ĠOh":3966,"Ġpositive":3967,"Ġculture":3968,"istics":3969,"ica":3970,"Ġhar":3971,"ya":3972,"itely":3973,"ipp":3974,"Ġmap":3975,"encies":3976,"ĠWilliam":3977,"II":3978,"akers":3979,"56":3980,"ĠMart":3981,"ĠRem":3982,"Ġaltern":3983,"itude":3984,"Ġcoach":3985,"rowd":3986,"Don":3987,"Ġkids":3988,"Ġjournal":3989,"Ġcorpor":3990,"Ġfalse":3991,"Ġweb":3992,"Ġsleep":3993,"Ġcontain":3994,"Ġsto":3995,"Ġbed":3996,"iverse":3997,"ĠRich":3998,"ĠChinese":3999,"Ġpun":4000,"Ġmeant":4001,"known":4002,"Ġnotice":4003,"Ġfavorite":4004,"aven":4005,"Ġcondition":4006,"Ġpurpose":4007,"))":4008,"Ġorganization":4009,"Ġchalleng":4010,"Ġmanufact":4011,"Ġsusp":4012,"ĠAc":4013,"Ġcritic":4014,"unes":4015,"uclear":4016,"Ġmer":4017,"vention":4018,"Ġ80":4019,"Ġmist":4020,"ĠUs":4021,"ĠTor":4022,"http":4023,"olf":4024,"Ġlarger":4025,"Ġadvant":4026,"Ġresear":4027,"Ġactions":4028,"ml":4029,"Ġkept":4030,"Ġaim":4031,",'":4032,"col":4033,"Ġbenefits":4034,"ifying":4035,"Ġactual":4036,"ĠInternational":4037,"Ġvehicle":4038,"Ġchief":4039,"Ġefforts":4040,"ĠLeague":4041,"ĠMost":4042,"Ġwait":4043,"Ġadult":4044,"Ġoverall":4045,"Ġspeech":4046,"Ġhighly":4047,"Ġfemale":4048,"Ġerror":4049,"Ġeffective":4050,"54":4051,"Ġencour":4052,"well":4053,"Ġfailed":4054,"Ġconserv":4055,"Ġprograms":4056,"Ġtrou":4057,"Ġahead":4058,"500":4059,"vertisement":4060,"IP":4061,"ĠFound":4062,"pir":4063,"Ġ%":4064,"Ġcrime":4065,"ander":4066,"Ġlocation":4067,"ĠIran":4068,"Ġbehavior":4069,"azing":4070,"Ġrare":4071,"Ġemb":4072,"Ġcaused":4073,"Ġship":4074,"Ġactive":4075,"Ġcontribut":4076,"Ġgreen":4077,"Ġacqu":4078,"Ġreflect":4079,"venue":4080,"Ġfirm":4081,"Ġbirth":4082,"].":4083,"Ġclearly":4084,"Ġemot":4085,"Ġagency":4086,"riage":4087,"Ġmemory":4088,"98":4089,"SA":4090,"ĠSee":4091,"acing":4092,"CC":4093,"Ġbiggest":4094,"Ġrap":4095,"Ġbasic":4096,"Ġband":4097,"eat":4098,"Ġsuspect":4099,"ĠMac":4100,"Ġ90":4101,"mark":4102,"istan":4103,"Ġspread":4104,"ams":4105,"ki":4106,"asy":4107,"rav":4108,"ĠRober":4109,"Ġdemonstr":4110,"rated":4111,"Ġabsolute":4112,"Ġplaces":4113,"Ġimpl":4114,"ibrary":4115,"Ġcards":4116,"Ġdestroy":4117,"Ġvirt":4118,"vere":4119,"Ġappeared":4120,"yan":4121,"point":4122,"Ġbeg":4123,"Ġtemper":4124,"spe":4125,"anted":4126,"ears":4127,"ĠDirect":4128,"Ġlength":4129,"Ġblog":4130,"amb":4131,"Ġinteg":4132,"Ġresources":4133,"acc":4134,"iful":4135,"Ġspot":4136,"Ġforced":4137,"Ġthousands":4138,"ĠMinister":4139,"Ġqual":4140,"ĠFrench":4141,"atically":4142,"Ġgenerally":4143,"Ġdrink":4144,"Ġthus":4145,"IL":4146,"odes":4147,"Ġappropri":4148,"ĠRead":4149,"Ġwhom":4150,"Ġeye":4151,"Ġcollege":4152,"Ġ45":4153,"irection":4154,"Ġensure":4155,"Ġapparent":4156,"iders":4157,"Ġreligious":4158,"Ġminor":4159,"olic":4160,"Ġtro":4161,"ĠWhy":4162,"ribute":4163,"met":4164,"Ġprimary":4165,"Ġdeveloped":4166,"Ġpeace":4167,"Ġskin":4168,"ste":4169,"ava":4170,"Ġblue":4171,"Ġfamilies":4172,"Ġir":4173,"Ġapply":4174,"Ġinform":4175,"ĠSmith":4176,"CT":4177,"ii":4178,"Ġlimit":4179,"Ġresist":4180,"................":4181,"umn":4182,"Ġconflic":4183,"Ġtwe":4184,"udd":4185,"ĠTom":4186,"Ġliter":4187,"que":4188,"bon":4189,"Ġhair":4190,"Ġeventually":4191,"Ġpus":4192,"Ġhelped":4193,"Ġagg":4194,"orney":4195,"ĠApple":4196,"Ġfit":4197,"ĠSur":4198,"Ġprem":4199,"Ġsales":4200,"Ġseconds":4201,"Ġstrength":4202,"Ġfeeling":4203,"¿½":4204,"Ġtour":4205,"Ġknows":4206,"oom":4207,"Ġexerc":4208,"Ġsomew":4209,"�":4210,">>":4211,"Ġspokes":4212,"Ġideas":4213,"Ġregist":4214,"soft":4215,"ĠDel":4216,"ĠPC":4217,"Ġpropos":4218,"Ġlaunch":4219,"Ġbottom":4220,"TH":4221,"ĠPlease":4222,"vest":4223,"itz":4224,"ĠInter":4225,"Ġscript":4226,"Ġrat":4227,"arning":4228,"Ġil":4229,"ĠJer":4230,"ĠAre":4231,"Ġwhatever":4232,"oken":4233,"cience":4234,"Ġmode":4235,"Ġagree":4236,"Ġsources":4237,"Ġinitial":4238,"Ġrestrict":4239,"Ġwonder":4240,"usion":4241,"####":4242,"ĠSil":4243,"ville":4244,"Ġburn":4245,"tw":4246,"asion":4247,"Ġ£":4248,"Ġnor":4249,"uing":4250,"Ġreached":4251,"Ġsun":4252,"Ġcateg":4253,"igration":4254,"Ġcook":4255,"Ġpromot":4256,"Ġmale":4257,"Ġclimate":4258,"Ġfix":4259,"Ġalleged":4260,"UR":4261,"alled":4262,"Ġimages":4263,"Cont":4264,"ota":4265,"Ġschools":4266,"ios":4267,"Ġdrop":4268,"Ġstream":4269,"ĠMo":4270,"Ġpreviously":4271,"aling":4272,"Ġpet":4273,"Ġdouble":4274,"Ġ(@":4275,"annel":4276,"Ġdefault":4277,"ties":4278,"Ġrank":4279,"ĠDec":4280,"ĠCouncil":4281,"Ġweapon":4282,"Ġstock":4283,"Ġanaly":4284,"ĠStr":4285,"Ġpicture":4286,"ĠPolice":4287,"ference":4288,"Ġcentury":4289,"Ġcitizens":4290,"Ġonto":4291,"Ġexpand":4292,"Ġhero":4293,"ĠSol":4294,"Ġwild":4295,"Ġupdate":4296,"Ġcustomers":4297,"ront":4298,"def":4299,"Ġlik":4300,"Ġcriminal":4301,"ĠChristian":4302,"SP":4303,"76":4304,"Ġleaving":4305,"Ġotherwise":4306,"ĠDist":4307,"Ġbasis":4308,"52":4309,"53":4310,"icip":4311,"ĠBer":4312,"Ġrecommend":4313,"Ġfloor":4314,"Ġcrowd":4315,"oles":4316,"Ġ70":4317,"Ġcentral":4318,"ĠEv":4319,"Ġdream":4320,"Ġdownload":4321,"Ġconfir":4322,"ĠThom":4323,"Ġwindow":4324,"Ġhappens":4325,"Ġunit":4326,"Ġtend":4327,"Ġspl":4328,"Ġbecomes":4329,"Ġfighting":4330,"Ġpredict":4331,"ĠPress":4332,"ĠPower":4333,"Ġheavy":4334,"aked":4335,"Ġfan":4336,"orter":4337,"ategy":4338,"BA":4339,"izes":4340,"Ġspend":4341,"Here":4342,"Ġ2007":4343,"Ġadop":4344,"ĠHam":4345,"Ġfootball":4346,"ĠPort":4347,"oday":4348,"51":4349,"ampions":4350,"Ġtransfer":4351,"ht":4352,"Ġ38":4353,"term":4354,"acity":4355,"Ġbur":4356,"],":4357,"ternal":4358,"rig":4359,"but":4360,"Ġtherefore":4361,"ĠBecause":4362,"resp":4363,"rey":4364,"Ġmission":4365,"Some":4366,"Ġnoted":4367,"Ġassum":4368,"Ġdisease":4369,"Ġedit":4370,"Ġprogress":4371,"rd":4372,"ĠBrown":4373,"ocal":4374,"Ġadding":4375,"Ġraised":4376,"ĠAny":4377,"Ġtick":4378,"Ġseeing":4379,"ĠPeople":4380,"Ġagreement":4381,"Ġserver":4382,"Ġwat":4383,"Ġdebate":4384,"Ġsupposed":4385,"iling":4386,"Ġlargest":4387,"Ġsuccessful":4388,"ĠPri":4389,"ĠDemocratic":4390,"Ġjump":4391,"ĠSyria":4392,"Ġowners":4393,"Ġoffers":4394,"Ġshooting":4395,"Ġeffic":4396,"sey":4397,"Ġhaven":4398,"verse":4399,"tered":4400,"ĠLight":4401,"imal":4402,"ĠBig":4403,"Ġdefend":4404,"Ġbeat":4405,"Ġrecords":4406,"%)":4407,"Ġscen":4408,"Ġemployees":4409,"Ġdevices":4410,"hem":4411,"Ġcommer":4412,"ĠMex":4413,"Ġbenefit":4414,"ĠProf":4415,"Ġilleg":4416,"Ġsurface":4417,"ĠAlso":4418,"Ġharm":4419,"ingly":4420,"wide":4421,"ĠAlex":4422,"Ġshut":4423,"ĠCur":4424,"Ġlose":4425,"pm":4426,"Ġchallenge":4427,"semb":4428,"Ġstation":4429,"Ġintelligence":4430,"Ġaccur":4431,"ĠFlor":4432,"Ġrequires":4433,"ĠMal":4434,"bum":4435,"Ġhospital":4436,"Ġspirit":4437,"Ġoffered":4438,"Ġproduce":4439,"ĠCommun":4440,"Ġcreating":4441,"Ġcris":4442,"spect":4443,"Ġended":4444,"Ġdaily":4445,"Ġvoters":4446,"lands":4447,"ias":4448,"ih":4449,"ona":4450,"Ġsmart":4451,"ĠOffice":4452,"ĠLord":4453,"rial":4454,"ĠInternet":4455,"Ġcircum":4456,"Ġextremely":4457,"'.":4458,"Ġopinion":4459,"ĠMil":4460,"Ġgain":4461,"BS":4462,"ĠFin":4463,"yp":4464,"Ġuseful":4465,"Ġbudget":4466,"Ġcomfort":4467,"isf":4468,"Ġbackground":4469,"eline":4470,"Ġepisode":4471,"Ġenemy":4472,"Ġtrial":4473,"Ġestablish":4474,"date":4475,"ĠCap":4476,"Ġcontinues":4477,"Ġshowing":4478,"ĠUnion":4479,"with":4480,"Ġposted":4481,"ĠSystem":4482,"Ġeat":4483,"rian":4484,"Ġrise":4485,"ĠGermany":4486,"ils":4487,"Ġsigned":4488,"Ġvill":4489,"Ġgrand":4490,"mor":4491,"ĠEngland":4492,"Ġprojects":4493,"umber":4494,"Ġconference":4495,"za":4496,"Ġresponsible":4497,"ĠArab":4498,"Ġlearned":4499,"âĢĶâĢĶ":4500,"ipping":4501,"ĠGeorge":4502,"OC":4503,"Ġreturned":4504,"ĠAustralia":4505,"Ġbrief":4506,"Qu":4507,"Ġbrand":4508,"illing":4509,"abled":4510,"Ġhighest":4511,"Ġtrain":4512,"ĠCommission":4513,"while":4514,"Ġnom":4515,"ception":4516,"Ġmut":4517,"ĠBlue":4518,"Ġincident":4519,"vant":4520,"86":4521,"ĠID":4522,"Ġnuclear":4523,"74":4524,"ĠLike":4525,"ĠRE":4526,"ĠMicro":4527,"li":4528,"mail":4529,"Ġcharges":4530,"89":4531,"Ġadjust":4532,"ado":4533,"Ġearth":4534,"NA":4535,"Ġprices":4536,"PA":4537,"Ġdraft":4538,"Ġruns":4539,"Ġcandidate":4540,"enses":4541,"Ġmanagement":4542,"ĠPhil":4543,"ĠMiss":4544,"Ġteach":4545,"gram":4546,"Ġunderstanding":4547,"ait":4548,"icago":4549,"Add":4550,"ĠEp":4551,"secut":4552,"Ġseparate":4553,"Ġinstance":4554,"Ġeth":4555,"Ġunless":4556,"********":4557,"ĠFore":4558,"inate":4559,"Ġoperations":4560,"Sp":4561,"Ġfaith":4562,"gar":4563,"ĠChurch":4564,"ronic":4565,"Ġconfig":4566,"osure":4567,"Ġactivities":4568,"Ġtraditional":4569,"Ġ36":4570,"Ġdirection":4571,"Ġmachine":4572,"Ġsurround":4573,"Ġpush":4574,"unction":4575,"ĠEU":4576,"Ġeasier":4577,"Ġargument":4578,"GB":4579,"Ġmicro":4580,"Ġspending":4581,"izations":4582,"Ġtheory":4583,"adow":4584,"Ġcalling":4585,"ĠLast":4586,"Ġder":4587,"Ġinfluence":4588,"Ġcommit":4589,"Ġphoto":4590,"Ġunc":4591,"istry":4592,"gn":4593,"aste":4594,"acks":4595,"Ġdisp":4596,"ady":4597,"do":4598,"ĠGood":4599,"Ġ`":4600,"Ġwish":4601,"Ġrevealed":4602,"³³":4603,"lig":4604,"Ġenforce":4605,"ĠCommittee":4606,"Ġchem":4607,"Ġmiles":4608,"Ġinterested":4609,"Ġsolution":4610,"icy":4611,"inct":4612,"Ġ->":4613,"ĠDet":4614,"Ġremoved":4615,"Ġcompar":4616,"eah":4617,"Ġplant":4618,"ĠSince":4619,"Ġachieve":4620,"Ġadvantage":4621,"Ġslightly":4622,"bing":4623,"Ġplaced":4624,"under":4625,"2015":4626,"ĠMad":4627,"Ġtim":4628,"oses":4629,"Ġcru":4630,"ĠRock":4631,"Ġmostly":4632,"Ġnegative":4633,"Ġsetting":4634,"Ġproduced":4635,"Ġmur":4636,"Ġconnection":4637,"ĠMer":4638,"Ġdriver":4639,"Ġexecutive":4640,"Ġassault":4641,"Ġborn":4642,"ĠVer":4643,"tained":4644,"Ġstructure":4645,"Ġreduce":4646,"Ġdecades":4647,"Ġded":4648,"uke":4649,"ĠMany":4650,"idden":4651,"Ġleague":4652,"Se":4653,"Ġjoin":4654,"Ġdisco":4655,"Ġdie":4656,"cks":4657,"actions":4658,"Ġassess":4659,"agn":4660,"Ġgoals":4661,"ours":4662,"IR":4663,"Ġsenior":4664,"iller":4665,"mod":4666,"ipment":4667,"ocol":4668,"uy":4669,"ĠQue":4670,"Ġparties":4671,"irgin":4672,"Ġlearning":4673,"itable":4674,"Ġstreet":4675,"Ġcamera":4676,"App":4677,"Ġskills":4678,"bre":4679,"cious":4680,"Ġcelebr":4681,"ĠFranc":4682,"Ġexisting":4683,"Ġwilling":4684,"lor":4685,"Ġid":4686,"ĠSpace":4687,"Ġcritical":4688,"ĠLa":4689,"ortunately":4690,"Ġserve":4691,"Ġcold":4692,"Ġspecies":4693,"TS":4694,"Ġanimals":4695,"ĠBay":4696,"Ġolder":4697,"ĠUnder":4698,"estic":4699,"ĠTre":4700,"Ġteacher":4701,"Ġprefer":4702,"vis":4703,"Ġthread":4704,"ĠMatt":4705,"Ġmanager":4706,"ãĥ»":4707,"Ġprofessional":4708,"ĠVol":4709,"Ġnotes":4710,"These":4711,"ula":4712,"Ġfresh":4713,"ented":4714,"uzz":4715,"edy":4716,"clusion":4717,"ĠRel":4718,"Ġdoubt":4719,"EO":4720,"Ġopened":4721,"ĠBit":4722,"Advertisement":4723,"Ġguess":4724,"ĠUN":4725,"Ġsequ":4726,"Ġexplain":4727,"otten":4728,"Ġattract":4729,"aks":4730,"Ġstring":4731,"Ġcontext":4732,"ossible":4733,"ĠRepublicans":4734,"Ġsolid":4735,"Ġcities":4736,"Ġasking":4737,"Ġrandom":4738,"ups":4739,"uries":4740,"arant":4741,"dden":4742,"gl":4743,"ĠFlorida":4744,"Ġdepend":4745,"ĠScott":4746,"Ġ33":4747,"ĠiT":4748,"icon":4749,"Ġmentioned":4750,"Ġ2000":4751,"Ġclaimed":4752,"Ġdefinitely":4753,"ulf":4754,"Ġcore":4755,"Ġopening":4756,"ĠConst":4757,"which":4758,"ĠTra":4759,"AG":4760,"72":4761,"Ġbelieved":4762,"ada":4763,"Ġ48":4764,"ĠSecurity":4765,"yright":4766,"ĠPet":4767,"ĠLou":4768,"Ġholding":4769,"================":4770,"Ġice":4771,"Ġbrow":4772,"Ġauthorities":4773,"host":4774,"word":4775,"Ġscore":4776,"ĠDiv":4777,"Ġcells":4778,"Ġtransl":4779,"Ġneighbor":4780,"Ġremove":4781,"uct":4782,"Ġdistrict":4783,"ĠAccording":4784,"Ġworse":4785,"Ġconcerns":4786,"Ġpresidential":4787,"Ġpolicies":4788,"ĠHall":4789,"73":4790,"Ġhus":4791,"AY":4792,"Ġ2006":4793,"ĠJud":4794,"Ġindependent":4795,"ĠJustice":4796,"iliar":4797,"print":4798,"ighter":4799,"Ġprotection":4800,"zen":4801,"Ġsudden":4802,"house":4803,"ĠJes":4804,"PR":4805,"ĠInf":4806,"Ġbul":4807,"Ġ_":4808,"ĠService":4809,"ĠPR":4810,"Ġstrategy":4811,"ffect":4812,"Ġgirls":4813,"Ġmissing":4814,"oyal":4815,"ĠTeam":4816,"ulated":4817,"Ġdat":4818,"Ġpolitics":4819,"abor":4820,"According":4821,"Ġspell":4822,"Ġgraph":4823,"orthern":4824,"TC":4825,"Ab":4826,"Ġlabor":4827,"isher":4828,"Ġkick":4829,"ĠiTunes":4830,"Ġsteps":4831,"poses":4832,"Ġsmaller":4833,"En":4834,"bert":4835,"Ġroll":4836,"Ġresearchers":4837,"Ġclosed":4838,"Ġtransport":4839,"Ġlawy":4840,"________________":4841,"ĠChicago":4842,"Ġaspect":4843,"Ġnone":4844,"Ġmarriage":4845,"96":4846,"Ġelements":4847,"ĠFre":4848,"ĠSal":4849,"Ġdram":4850,"FC":4851,"top":4852,"equ":4853,"Ġhearing":4854,"Ġsupported":4855,"Ġtesting":4856,"cohol":4857,"Ġmassive":4858,"Ġstick":4859,"Ġguard":4860,"isco":4861,"phone":4862,"From":4863,"However":4864,"Ġborder":4865,"Ġcopy":4866,"ography":4867,"list":4868,"71":4869,"Ġowner":4870,"class":4871,"ruit":4872,"rate":4873,"ĠOnce":4874,"Ġdigital":4875,"Ġtask":4876,"ERS":4877,"Ġincred":4878,"tes":4879,"++":4880,"ĠFrance":4881,"Ġbreat":4882,"owl":4883,"Ġissued":4884,"ĠWestern":4885,"Ġdetect":4886,"Ġpartners":4887,"Ġshared":4888,"ĠCall":4889,"Ġcancer":4890,"ache":4891,"ribe":4892,"Ġexplained":4893,"Ġheat":4894,"{\"":4895,"Ġinvestment":4896,"ĠBook":4897,"Ġwood":4898,"Ġtools":4899,"ĠAlthough":4900,"Ġbelief":4901,"Ġcrisis":4902,"Ġge":4903,"ĠMP":4904,"Ġoperation":4905,"type":4906,"~~":4907,"ga":4908,"Ġcontains":4909,"anta":4910,"Ġexpress":4911,"ĠGroup":4912,"ĠJournal":4913,"ka":4914,"Ġamb":4915,"ĠUSA":4916,"Ġfinding":4917,"Ġfunding":4918,"how":4919,"Ġestablished":4920,"ideos":4921,"Ġdegree":4922,"Ġdangerous":4923,"anging":4924,"Ġfreedom":4925,"pport":4926,"outhern":4927,"Ġchurch":4928,"Ġcatch":4929,"ĠTwo":4930,"Ġpresence":4931,"ĠGuard":4932,"Up":4933,"Ġauthority":4934,"ĠProject":4935,"Ġbutton":4936,"Ġconsequ":4937,"Ġvalid":4938,"Ġweak":4939,"Ġstarts":4940,"Ġreference":4941,"ĠMem":4942,"\")":4943,"UN":4944,"orage":4945,"ĠOpen":4946,"Ġcollection":4947,"ym":4948,"gency":4949,"Ġbeautiful":4950,"ros":4951,"Ġtells":4952,"Ġwaiting":4953,"nel":4954,"Ġproviding":4955,"ĠDemocrats":4956,"Ġdaughter":4957,"Ġmaster":4958,"Ġpurposes":4959,"ĠJapanese":4960,"Ġequal":4961,"Ġturns":4962,"Ġdocuments":4963,"Ġwatching":4964,"Res":4965,"Ġran":4966,"2014":4967,"Ġreject":4968,"ĠKorea":4969,"Ġvictims":4970,"Level":4971,"erences":4972,"Ġwitness":4973,"Ġ34":4974,"Ġreform":4975,"coming":4976,"Ġoccup":4977,"Ġcaught":4978,"Ġtraffic":4979,"ading":4980,"Ġmodels":4981,"ario":4982,"Ġserved":4983,"Ġbatter":4984,"uate":4985,"ĠSecretary":4986,"Ġagreed":4987,"Ġtruly":4988,"ynam":4989,"ĠRet":4990,"Ġunits":4991,"ĠResearch":4992,"hand":4993,"azine":4994,"ĠMike":4995,"Ġvariety":4996,"otal":4997,"Ġamazing":4998,"Ġconfirmed":4999,"Ġentirely":5000,"Ġpurchase":5001,"Ġelement":5002,"Ġcash":5003,"Ġdetermine":5004,"De":5005,"Ġcars":5006,"ĠWall":5007,"âĸ":5008,"Ġviews":5009,"Ġdrugs":5010,"Ġdepartment":5011,"ĠStep":5012,"uit":5013,"Ġ39":5014,"asure":5015,"ĠClass":5016,"Ġcovered":5017,"ĠBank":5018,"Ġmere":5019,"uana":5020,"Ġmulti":5021,"Ġmix":5022,"Ġunlike":5023,"levision":5024,"Ġstopped":5025,"Ġsem":5026,"ĠGal":5027,"ules":5028,"Ġwel":5029,"ĠJohnson":5030,"la":5031,"Ġskill":5032,"Ġbecoming":5033,"rie":5034,"Ġappropriate":5035,"fe":5036,"ellow":5037,"ĠProt":5038,"ulate":5039,"ocation":5040,"Ġweekend":5041,"odies":5042,"Ġsites":5043,"Ġanimal":5044,"ĠTim":5045,"Ġscale":5046,"Ġcharged":5047,"Ġinstruct":5048,"illa":5049,"Ġmethods":5050,"Ġcert":5051,"Ġjudge":5052,"ĠHel":5053,"Ġdollars":5054,"Ġstanding":5055,"ĠSqu":5056,"Ġdebt":5057,"liam":5058,"Ġdriving":5059,"ĠSum":5060,"ĠEdition":5061,"Ġalbum":5062,"andon":5063,"IF":5064,"ĠUk":5065,"63":5066,"ader":5067,"Ġcommercial":5068,"esh":5069,"ĠGovernment":5070,"Ġdiscovered":5071,"Ġoutput":5072,"ĠHillary":5073,"ĠCarol":5074,"Ġ2005":5075,"Ġabuse":5076,"ancing":5077,"Ġswitch":5078,"Ġannual":5079,"Tw":5080,"Ġstated":5081,"agement":5082,"inner":5083,"Ġdemocr":5084,"Ġresidents":5085,"Ġallowing":5086,"Ġfactors":5087,"odd":5088,"Ġfuck":5089,"emies":5090,"Ġoccurred":5091,"oti":5092,"Ġnorth":5093,"ĠPublic":5094,"Ġinjury":5095,"Ġinsurance":5096,"CL":5097,"olly":5098,"ãĢ":5099,"Ġrepeated":5100,"Ġarms":5101,"anged":5102,"Ġconstruction":5103,"Ġfle":5104,"PU":5105,"icians":5106,"Ġforms":5107,"ĠMcC":5108,"antic":5109,"Ġmental":5110,"pire":5111,"Ġequipment":5112,"Ġfant":5113,"Ġdiscussion":5114,"Ġregarding":5115,"kin":5116,"arp":5117,"Ġchair":5118,"ogue":5119,"Ġproceed":5120,"ĠId":5121,"Our":5122,"Ġmurder":5123,"Man":5124,"Ġ49":5125,"asp":5126,"Ġsupply":5127,"Ġinput":5128,"Ġwealth":5129,"liament":5130,"Ġproced":5131,"orial":5132,"ĠStat":5133,"ĠNFL":5134,"hens":5135,"ĠInstitute":5136,"Ġputting":5137,"ournament":5138,"etic":5139,"Ġlocated":5140,"Ġkid":5141,"eria":5142,"run":5143,"Ġprinc":5144,"Ġ!":5145,"going":5146,"ĠBet":5147,"Ġclot":5148,"Ġtelling":5149,"Ġproposed":5150,"iot":5151,"orry":5152,"Ġfunds":5153,"gment":5154,"ĠLife":5155,"Ġbaby":5156,"ĠBack":5157,"Ġspoke":5158,"Image":5159,"Ġearn":5160,"ĠAT":5161,"gu":5162,"Ġexchange":5163,"ĠLin":5164,"oving":5165,"Ġpair":5166,"More":5167,"azon":5168,"Ġarrested":5169,"Ġkilling":5170,"can":5171,"ĠCard":5172,"yd":5173,"Ġidentified":5174,"Ġmobile":5175,"Ġthanks":5176,"onym":5177,"ĠForm":5178,"Ġhundreds":5179,"ĠChris":5180,"ĠCat":5181,"Ġtrend":5182,"hat":5183,"ĠAv":5184,"oman":5185,"Ġelectric":5186,"ĠWil":5187,"SE":5188,"Of":5189,"Ġrestaur":5190,"oted":5191,"Ġtrig":5192,"Ġnine":5193,"Ġbomb":5194,"Why":5195,"¯":5196,"Ġcoverage":5197,"Ġappeal":5198,"ĠRobert":5199,"ĠSup":5200,"Ġfinished":5201,"Ġflow":5202,"Ġdeliver":5203,"Ġcalcul":5204,"Ġphotos":5205,"Ġphil":5206,"Ġpieces":5207,"Ġappre":5208,"kes":5209,"Ġrough":5210,"Do":5211,"Ġpartner":5212,"Ġconcerned":5213,"Ġ37":5214,"ĠGen":5215,"Col":5216,"ctors":5217,"Ġ=>":5218,"state":5219,"Ġsuggested":5220,"ĠForce":5221,"CE":5222,"Ġherself":5223,"ĠPlan":5224,"works":5225,"ooth":5226,"rency":5227,"Ġcorner":5228,"Ġhusband":5229,"Ġinternet":5230,"ĠAut":5231,"ems":5232,"osen":5233,"ĠAtl":5234,"gen":5235,"Ġbalance":5236,"62":5237,"Ġsounds":5238,"text":5239,"Ġarr":5240,"oves":5241,"Ġmillions":5242,"Ġradio":5243,"Ġsatisf":5244,"ĠDam":5245,"Mr":5246,"Go":5247,"Spe":5248,"Ġcombat":5249,"rant":5250,"ĠGree":5251,"Ġfuel":5252,"Ġdistance":5253,"Ġtests":5254,"Ġdecre":5255,"ĠEr":5256,"Ġmanaged":5257,"DS":5258,"Ġtit":5259,"Ġmeasures":5260,"ĠLiber":5261,"Ġattend":5262,"ashed":5263,"ĠJose":5264,"ĠNight":5265,"dit":5266,"ĠNov":5267,"ĠEnd":5268,"outs":5269,"Ġgeneration":5270,"Ġadvoc":5271,"yth":5272,"Ġconversation":5273,"ĠSky":5274,"active":5275,"cel":5276,"rier":5277,"ĠFrank":5278,"Ġgender":5279,"Ġconcent":5280,"Ġcarried":5281,"anda":5282,"ĠVirgin":5283,"Ġarrived":5284,"icide":5285,"aded":5286,"Ġfailure":5287,"Ġminimum":5288,"lets":5289,"Ġworst":5290,"Ġkeeping":5291,"Ġintended":5292,"Ġillegal":5293,"Ġsubsc":5294,"Ġdetermined":5295,"Ġtrip":5296,"Yes":5297,"Ġraise":5298,"Ġ~":5299,"Ġfeels":5300,"Ġpackage":5301,"ĠJo":5302,"hi":5303,"2016":5304,"real":5305,"Ġfra":5306,"Ġsymb":5307,"Me":5308,"ucky":5309,"pret":5310,"ĠKh":5311,"ĠEdit":5312,"ĠWeb":5313,"emic":5314,"ĠColor":5315,"Ġjustice":5316,"Int":5317,"Ġfarm":5318,"cknow":5319,"\">":5320,"eless":5321,"Ġreduced":5322,"Ġ500":5323,"xx":5324,"ĠRad":5325,"ĠWood":5326,"Ġclin":5327,"Ġhyp":5328,"iler":5329,"ura":5330,"kins":5331,"85":5332,"61":5333,"ĠTheir":5334,"ĠMary":5335,"Ġsan":5336,"Ġnovel":5337,"ĠWho":5338,"Ġcapacity":5339,"Ġimpossible":5340,"Ġplays":5341,"Ġminister":5342,"ijuana":5343,"icate":5344,"ĠSet":5345,"Ġfram":5346,"Ġing":5347,"Ġcommunities":5348,"ĠFBI":5349,"ita":5350,"Ġbon":5351,"Ġstrateg":5352,"Ġinterests":5353,"lock":5354,"gers":5355,"mas":5356,"ĠAND":5357,"Ġconflict":5358,"Ġrequirements":5359,"Ġsac":5360,"Ġoperating":5361,"ini":5362,"related":5363,"Ġcommitted":5364,"Ġrelatively":5365,"Ġsouth":5366,"¯¯":5367,"Ġafford":5368,"Ġidentity":5369,"Ġdecisions":5370,"Ġaccused":5371,"place":5372,"Ġvictory":5373,"och":5374,"iat":5375,"Name":5376,"Com":5377,"tion":5378,"eds":5379,"Ġseek":5380,"Ġtight":5381,"ĠImages":5382,"Ġiniti":5383,"Ġhumans":5384,"Ġfamiliar":5385,"Ġaudience":5386,"Ġinternal":5387,"venture":5388,"Ġsides":5389,"ĠTO":5390,"Ġdim":5391,"Ġconclud":5392,"Ġappoint":5393,"Ġenforcement":5394,"ĠJim":5395,"ĠAssociation":5396,"Ġcircumst":5397,"ĠCanadian":5398,"Ġjoined":5399,"Ġdifferences":5400,"ĠLos":5401,"Ġprotest":5402,"Ġtwice":5403,"win":5404,"Ġglass":5405,"arsh":5406,"ĠArmy":5407,"Ġexpression":5408,"Ġdecide":5409,"Ġplanning":5410,"ania":5411,"Ġhandle":5412,"ĠMicrosoft":5413,"ĠNor":5414,"Ġmaximum":5415,"ĠRev":5416,"Ġsea":5417,"Ġeval":5418,"Ġhelps":5419,"ref":5420,"Ġbound":5421,"Ġmouth":5422,"Ġstandards":5423,"Ġclim":5424,"ĠCamp":5425,"ĠFox":5426,"cles":5427,"Ġarmy":5428,"ĠTechn":5429,"acking":5430,"xy":5431,"SS":5432,"Ġ42":5433,"Ġbug":5434,"ĠUkrain":5435,"ĠMax":5436,"ĠJones":5437,"ĠShow":5438,"lo":5439,"Ġplanet":5440,"Ġ75":5441,"Ġwinning":5442,"Ġfaster":5443,"Ġspect":5444,"Ġbroken":5445,"TR":5446,"Ġdefined":5447,"Ġhealthy":5448,"Ġcompetition":5449,"https":5450,"ĠIsland":5451,"ĠFe":5452,"Ġannounce":5453,"ĠCup":5454,"ĠInstead":5455,"Ġclient":5456,"Ġpossibly":5457,"section":5458,"ocket":5459,"look":5460,"Ġfinish":5461,"Ġcrew":5462,"Ġreserv":5463,"Ġeditor":5464,"Ġhate":5465,"Ġsale":5466,"Ġcontrovers":5467,"Ġpages":5468,"wing":5469,"Ġnumer":5470,"Ġopposition":5471,"Ġ2004":5472,"Ġrefuge":5473,"Ġflight":5474,"Ġapart":5475,"ĠLat":5476,"Americ":5477,"ĠAfrica":5478,"Ġapplications":5479,"ĠPalest":5480,"ĠBur":5481,"Ġgar":5482,"ĠSocial":5483,"Ġupgr":5484,"Ġshape":5485,"Ġspeaking":5486,"ansion":5487,"ao":5488,"ĠSn":5489,"Ġworry":5490,"ĠBritain":5491,"Please":5492,"roud":5493,"Ġhun":5494,"Ġintroduced":5495,"Ġdiet":5496,"Ind":5497,"ĠSecond":5498,"Ġfunctions":5499,"uts":5500,"ĠEach":5501,"ĠJeff":5502,"Ġstress":5503,"Ġaccounts":5504,"Ġguarant":5505,"ĠAnn":5506,"edia":5507,"Ġhonest":5508,"Ġtree":5509,"ĠAfrican":5510,"ĠBush":5511,"},":5512,"Ġsch":5513,"ĠOnly":5514,"Ġfif":5515,"igan":5516,"Ġexercise":5517,"ĠExp":5518,"Ġscientists":5519,"Ġlegislation":5520,"ĠWork":5521,"ĠSpr":5522,"ÃĤ":5523,"ĠHuman":5524,"Ġè":5525,"Ġsurvey":5526,"Ġrich":5527,"rip":5528,"Ġmaintain":5529,"Ġflo":5530,"Ġleadership":5531,"stream":5532,"ĠIslamic":5533,"Ġ01":5534,"ĠCollege":5535,"Ġmagic":5536,"ĠPrime":5537,"Ġfigures":5538,"2017":5539,"inder":5540,"xual":5541,"ĠDead":5542,"Ġabsolutely":5543,"Ġfourth":5544,"Ġpresented":5545,"respond":5546,"rible":5547,"Ġalcohol":5548,"ato":5549,"ĠDE":5550,"porary":5551,"Ġgrab":5552,"Ġvari":5553,"Ġquant":5554,"ĠPhoto":5555,"Ġplus":5556,"rick":5557,"arks":5558,"Ġalternative":5559,"Ġpil":5560,"Ġapprox":5561,"that":5562,"Ġobjects":5563,"ĠRo":5564,"ĠAndroid":5565,"Ġsignificantly":5566,"ĠRoad":5567,"kay":5568,"Read":5569,"avor":5570,"Ġacknow":5571,"ĠHD":5572,"ĠSing":5573,"Or":5574,"ĠMont":5575,"Ġuns":5576,"prof":5577,"Ġnegoti":5578,"ĠArch":5579,"iki":5580,"Ġtelevision":5581,"ĠJewish":5582,"Ġcommittee":5583,"Ġmotor":5584,"Ġappearance":5585,"Ġsitting":5586,"Ġstrike":5587,"ĠDown":5588,"comp":5589,"ĠHist":5590,"Ġfold":5591,"acement":5592,"ĠLouis":5593,"Ġbelong":5594,"ĠâĢ¢":5595,"Ġmort":5596,"Ġprepared":5597,"Ġ64":5598,"ĠMaster":5599,"Ġindeed":5600,"ĠDen":5601,"Ġrent":5602,"TA":5603,"ourney":5604,"arc":5605,"Su":5606,"97":5607,"Ġadvice":5608,"Ġchanging":5609,"Ġlisted":5610,"Ġlaunched":5611,"isation":5612,"ĠPeter":5613,"ishes":5614,"Ġlived":5615,"ĠMel":5616,"ĠSupreme":5617,"ĠFederal":5618,"Ġ);":5619,"ructure":5620,"Ġsets":5621,"Ġphilos":5622,"uous":5623,"ĠÂł":5624,"Ġapplied":5625,"ĠNOT":5626,"Ġhousing":5627,"ĠMount":5628,"Ġodd":5629,"Ġsust":5630,"DA":5631,"fficient":5632,"Ġ?":5633,"olved":5634,"Ġpowers":5635,"Ġthr":5636,"Ġremaining":5637,"ĠWater":5638,"LC":5639,"Ġcauses":5640,"ãģ®":5641,"Ġmanner":5642,"ads":5643,"Ġsuggests":5644,"Ġends":5645,"standing":5646,"fig":5647,"ĠDun":5648,"idth":5649,"Ġgay":5650,"Ġtermin":5651,"ĠAngeles":5652,"MS":5653,"Ġscientific":5654,"Ġcoal":5655,"apers":5656,"bar":5657,"ĠThomas":5658,"Ġsym":5659,"ĠRun":5660,"this":5661,"PC":5662,"igrants":5663,"Ġminute":5664,"ĠDistrict":5665,"cellent":5666,"Ġleaves":5667,"Ġcompleted":5668,"amin":5669,"Ġfocused":5670,"Ġmonitor":5671,"Ġvehicles":5672,"MA":5673,"ĠMass":5674,"ĠGrand":5675,"Ġaffected":5676,"itutional":5677,"Ġconstruct":5678,"Ġfollows":5679,"Ġton":5680,"reens":5681,"Ġhomes":5682,"ĠExt":5683,"ĠLevel":5684,"rast":5685,"ĠIr":5686,"Ġelim":5687,"Ġlargely":5688,"ĠJoe":5689,"Ġvotes":5690,"alls":5691,"Ġbusinesses":5692,"ĠFoundation":5693,"ĠCentral":5694,"Ġyards":5695,"Ġmaterials":5696,"ulner":5697,"Ġguide":5698,"Ġcloser":5699,"ums":5700,"Ġsports":5701,"eder":5702,"Just":5703,"Ġtaxes":5704,"84":5705,"ĠOld":5706,"Ġdecade":5707,"ola":5708,"Ġvir":5709,"Ġdropped":5710,"Ġdelay":5711,"itect":5712,"Ġsecure":5713,"stein":5714,"level":5715,"Ġtreated":5716,"Ġfiled":5717,"aine":5718,"Ġvan":5719,"Ġmir":5720,"Ġcolumn":5721,"icted":5722,"eper":5723,"Ġrot":5724,"Ġconsult":5725,"Ġentry":5726,"Ġmarijuana":5727,"ĠDou":5728,"Ġapparently":5729,"oking":5730,"clusive":5731,"Ġincreases":5732,"ano":5733,"Ġspecifically":5734,"Ġtele":5735,"ensions":5736,"Ġreligion":5737,"abilities":5738,"Ġframe":5739,"ĠNote":5740,"ĠLee":5741,"Ġhelping":5742,"Ġedge":5743,"oston":5744,"Ġorganizations":5745,"Ãĥ":5746,"ĠBoth":5747,"hips":5748,"Ġbigger":5749,"Ġboost":5750,"ĠStand":5751,"Ġrow":5752,"uls":5753,"abase":5754,"Ġrid":5755,"Let":5756,"aren":5757,"rave":5758,"Ġstret":5759,"PD":5760,"Ġvision":5761,"Ġwearing":5762,"Ġappreci":5763,"Ġaward":5764,"ĠUse":5765,"Ġfactor":5766,"war":5767,"ulations":5768,")(":5769,"Ġgod":5770,"Ġterrit":5771,"Ġparam":5772,"asts":5773,"87":5774,"Ġenemies":5775,"ĠGames":5776,"FF":5777,"Ġaccident":5778,"Well":5779,"ĠMartin":5780,"TER":5781,"Ġath":5782,"ĠHell":5783,"Ġforg":5784,"Ġveter":5785,"ĠMedic":5786,"free":5787,"Ġstars":5788,"Ġexpensive":5789,"Ġacad":5790,"rawn":5791,"ĠWhe":5792,"Ġlock":5793,"Ġformat":5794,"Ġsoldiers":5795,"sm":5796,"Ġagent":5797,"Ġresponsibility":5798,"ora":5799,"ĠScience":5800,"Ġrapid":5801,"Ġtough":5802,"ĠJesus":5803,"Ġbelieves":5804,"ML":5805,"Ġwear":5806,"lete":5807,"ÃĥÃĤ":5808,"ĠDri":5809,"Ġcommission":5810,"ĠBob":5811,"Oh":5812,"aped":5813,"Ġwarm":5814,"ÃĥÃĤÃĥÃĤ":5815,"Ġ2003":5816,"ortion":5817,"Ġhasn":5818,"uster":5819,"Ġunivers":5820,"ĠIll":5821,"Ġking":5822,"ologies":5823,"94":5824,"ĠTem":5825,"ĠMos":5826,"Ġpatient":5827,"ĠMexico":5828,"cean":5829,"ĠDeath":5830,"ĠSanders":5831,"you":5832,"ĠCast":5833,"ĠCompany":5834,"pty":5835,"Ġhappening":5836,"FP":5837,"ĠBattle":5838,"Ġbought":5839,"Am":5840,"Mod":5841,"Us":5842,"uters":5843,"ĠCre":5844,"ĠThose":5845,"Ġ44":5846,"iser":5847,"Ġsoul":5848,"ĠTop":5849,"ĠHarry":5850,"ĠAw":5851,"Ġseat":5852,"ffee":5853,"Ġrevolution":5854,"Ġ(\"":5855,"ĠDuring":5856,"ette":5857,"Ġring":5858,"Ġoffensive":5859,"Ġreturns":5860,"Ġvideos":5861,"Ġdiscl":5862,"Ġfamous":5863,"enced":5864,"ĠSign":5865,"ĠRiver":5866,"Ġ300":5867,"PM":5868,"ĠBus":5869,"ĠCH":5870,"Ġcandidates":5871,"arden":5872,"Ġpercentage":5873,"Ġvisual":5874,"Ġthank":5875,"Ġtrouble":5876,"nergy":5877,"Ġ2001":5878,"Ġprove":5879,"ashion":5880,"Ġenh":5881,"ĠLong":5882,"UM":5883,"Ġconnected":5884,"Ġpossibility":5885,"Over":5886,"Ġexpert":5887,"Ġlibrary":5888,"arts":5889,"ĠDirector":5890,"Ġfellow":5891,"92":5892,"irty":5893,"Ġdry":5894,"Ġsigns":5895,"ĠLove":5896,"Ġquiet":5897,"foot":5898,"Ġpure":5899,"ĠHun":5900,"Ġfilled":5901,"phas":5902,"ĠElect":5903,"endment":5904,"ĠExpl":5905,"Ġunable":5906,"ns":5907,"mo":5908,"Ġvast":5909,"obe":5910,"Ġidentify":5911,"apping":5912,"ĠCarolina":5913,"gress":5914,"Ġprote":5915,"Ġfish":5916,"Ġcircumstances":5917,"razy":5918,"ĠPhot":5919,"Ġbodies":5920,"ĠMur":5921,"Ġdeveloping":5922,"ĠAR":5923,"Ġexperienced":5924,"Ġsubstant":5925,"ĠBoard":5926,"esome":5927,"Ġdomestic":5928,"Ġcombined":5929,"ĠPut":5930,"Ġchemical":5931,"ĠChild":5932,"Ġpool":5933,"ĠCy":5934,"Ġegg":5935,"cons":5936,"sters":5937,"Ġhurt":5938,"Ġmarkets":5939,"Ġconservative":5940,"Ġsupporters":5941,"Ġagencies":5942,"idel":5943,"Ob":5944,"urb":5945,"Ġ43":5946,"ĠDefense":5947,"ye":5948,"ĠAp":5949,"dule":5950,"Ġtemperature":5951,"Ġconducted":5952,"ĠChief":5953,"Ġpulled":5954,"Ġfol":5955,"Last":5956,"onto":5957,"osis":5958,"VER":5959,"Des":5960,"ĠPan":5961,"First":5962,"Ġadvance":5963,"Ġlicense":5964,"rors":5965,"ĠJon":5966,"Ġimagine":5967,"Ġhell":5968,"Ġfixed":5969,"Ġincor":5970,"osite":5971,"ĠLog":5972,"icken":5973,"]:":5974,"Ġsurprise":5975,"hab":5976,"Ġcraft":5977,"olt":5978,"ĠJul":5979,"Ġdial":5980,"Ġrelevant":5981,"Ġentered":5982,"Ġleads":5983,"ĠAD":5984,"ĠClean":5985,"Ġpictures":5986,"essor":5987,"Ġalt":5988,"Ġpaying":5989,"Per":5990,"ĠMarket":5991,"Ġupdates":5992,"amily":5993,"ĠType":5994,"ĠHome":5995,"Ġ55":5996,"sembly":5997,"rome":5998,"83":5999,"Ġgreatest":6000,"Ġheight":6001,"Ġheav":6002,"aints":6003,"Ġlisten":6004,"aser":6005,"ĠSH":6006,"Ġcapable":6007,"acle":6008,"Ġperspect":6009,"inating":6010,"Ġoffering":6011,"rypt":6012,"ĠDevelop":6013,"abin":6014,"rc":6015,"Ġbright":6016,"alty":6017,"arrow":6018,"Ġsuppl":6019,"inding":6020,"acked":6021,"gypt":6022,"ĠAnother":6023,"pg":6024,"ĠVirginia":6025,"ĠLu":6026,"Ġplanned":6027,"Ġpit":6028,"Ġsweet":6029,"Type":6030,"ĠDi":6031,"Ġtypically":6032,"ĠFrancisco":6033,"Ġprospect":6034,"ĠDan":6035,"Ġteen":6036,"rees":6037,"Ġsched":6038,"Ġhol":6039,"Ġscr":6040,"Ġlots":6041,"life":6042,"Ġnewsp":6043,"Ġforget":6044,"ĠNone":6045,"ĠMiddle":6046,"ĠRyan":6047,"edd":6048,"Ġsevere":6049,"Ġsuit":6050,"ller":6051,"93":6052,"Ġcorrespond":6053,"Ġexplos":6054,"uations":6055,"Ġflag":6056,"game":6057,"rid":6058,"Ġprin":6059,"ĠData":6060,"Ġdeploy":6061,"ĠEnter":6062,"suit":6063,"ghan":6064,"ĠMen":6065,"Ġthoughts":6066,"Ġmatters":6067,"Ġadapt":6068,"ĠAri":6069,"Ġfill":6070,"Ġforth":6071,"Ġsam":6072,"Ġ41":6073,"Ġpayment":6074,"ĠHor":6075,"Ġspring":6076,"duc":6077,"Ġlosing":6078,"Ġbringing":6079,"FO":6080,"ala":6081,"Ġdistribution":6082,"hered":6083,"bour":6084,"ĠIsraeli":6085,"oma":6086,"Ġcombination":6087,"Ġplenty":6088,"VE":6089,"Can":6090,"ĠHaw":6091,"Ġperman":6092,"ĠSpecial":6093,"Ġtow":6094,"Ġseeking":6095,"Ġexamples":6096,"Ġclasses":6097,"cr":6098,"Ġbeer":6099,"Ġmoves":6100,"ĠIP":6101,"ĠKn":6102,"Ġpanel":6103,"Even":6104,"Ġproperly":6105,"Ġris":6106,"Ġplug":6107,"Ġestimated":6108,"Every":6109,"Ġdefensive":6110,"agraph":6111,"Ġpregn":6112,"Ġinstit":6113,"ĠVict":6114,"Ġvolume":6115,"Ġpositions":6116,"Ġlinks":6117,"ĠProgram":6118,"ĠWeek":6119,"agues":6120,"Ġtransform":6121,"ker":6122,"ĠCEO":6123,"Ġcas":6124,"Ġopponent":6125,"Ġtweet":6126,"ĠCode":6127,"Ġshop":6128,"Ġfly":6129,"Ġtalks":6130,"Ġbag":6131,"Phone":6132,"Ġaid":6133,"Ġplants":6134,"Ġ65":6135,"Ġattorney":6136,"arters":6137,"quest":6138,"ĠMagic":6139,"Ġbegins":6140,"Ġmyster":6141,"Ġenvironmental":6142,"Ġstorage":6143,"NN":6144,"Ġmarg":6145,"Ġske":6146,"Ġmetal":6147,"elly":6148,"Ġordered":6149,"Ġremained":6150,"Ġloved":6151,"Ġprompt":6152,"Ġupdated":6153,"Ġexperts":6154,"Ġwalking":6155,"Ġancient":6156,"Ġperformed":6157,"ATE":6158,"Ġneither":6159,"iency":6160,"Ġmanufacture":6161,"ĠPak":6162,"Ġselected":6163,"Ġmine":6164,"Ġultimately":6165,"Ġexplan":6166,"Ġlabel":6167,"ĠServices":6168,"ributed":6169,"Trump":6170,"Ġsyn":6171,"ĠUlt":6172,"SC":6173,"Ġmeat":6174,"Ġgiant":6175,"ĠWars":6176,"ĠON":6177,"Ġadm":6178,"Ġinterpret":6179,"Ġevening":6180,"Ġevil":6181,"ĠBoston":6182,"ĠWild":6183,"ĠÃ":6184,"ĠBitcoin":6185,"ĠAmazon":6186,"Dr":6187,"ĠInformation":6188,"Ġobviously":6189,"Ġadvanced":6190,"Photo":6191,"olar":6192,"Ġweather":6193,"Ġsymbol":6194,"Ġsole":6195,"Ġpotentially":6196,"oster":6197,"Ġoriginally":6198,"mun":6199,"300":6200,"aze":6201,"essions":6202,"Ġdeck":6203,"Ġstood":6204,"Ġyouth":6205,"ĠBern":6206,"Rep":6207,"ĠTest":6208,"Ġbasically":6209,"otic":6210,"Ġinvolve":6211,"olit":6212,"lyn":6213,"See":6214,"Ġaircraft":6215,"Ġconfirm":6216,"EW":6217,"Ġmessages":6218,"ĠRichard":6219,"Ġkit":6220,"Ġprohib":6221,"Ġvulner":6222,"isters":6223,"Ġexistence":6224,"Ġturning":6225,"ĠSP":6226,"Ġdesire":6227,"Ġflat":6228,"Ġment":6229,"season":6230,"anges":6231,"Ġneighborhood":6232,"ĠLake":6233,"ATION":6234,"Ġpointed":6235,"bur":6236,"Ġinnov":6237,"ucks":6238,"UL":6239,"Ġprofessor":6240,"Ġexpressed":6241,"AB":6242,"icious":6243,"Ġ2002":6244,"ĠDev":6245,"Ġsession":6246,"Ġbare":6247,"sen":6248,"Ġdiss":6249,"ĠCath":6250,"ĠPass":6251,"ĠPoint":6252,"Ġdoctor":6253,"orrow":6254,"ailed":6255,"ĠRub":6256,"ĠDC":6257,"ĠCharl":6258,"person":6259,"Ġwriter":6260,"ighters":6261,"ureau":6262,"Ġoblig":6263,"Ġrecorded":6264,"Ġbroke":6265,"Ġorders":6266,"ilty":6267,"Ġmotion":6268,"inity":6269,"law":6270,"adium":6271,"Ġimmigration":6272,"Ġcontrast":6273,"Ġbatt":6274,"Ġexcellent":6275,"Ġtechnical":6276,"ami":6277,"Ġtun":6278,"Ġcloud":6279,"ĠYear":6280,"geon":6281,"Ġcreation":6282,"Ġstrange":6283,"Ġauth":6284,"Ġfort":6285,"born":6286,"Ġextent":6287,"ĠToday":6288,"ĠClub":6289,"Ġrain":6290,"Ġsample":6291,"Ġaccepted":6292,"Ġtact":6293,"Ġfired":6294,"ĠSon":6295,"Ġstands":6296,"Ġboot":6297,"Ġ47":6298,"Ġstatements":6299,"Ġversions":6300,"Ġselling":6301,"ounded":6302,"Ġ1990":6303,"Ġweren":6304,"ĠWatch":6305,"Ġexperiment":6306,"Post":6307,"Ġretail":6308,"uled":6309,"Inst":6310,"unte":6311,"ãĥ¼":6312,"Ġdepart":6313,"Ġbond":6314,"ivery":6315,"ompl":6316,"Ġreaction":6317,"ĠSyrian":6318,"ĠPac":6319,"apped":6320,"aniel":6321,"DP":6322,"Ġresolution":6323,"Ġreact":6324,"Ġapproved":6325,"onom":6326,"mond":6327,"ĠOffic":6328,"---":6329,"Ġreplace":6330,"Ġtack":6331,"Ġsport":6332,"Ġchain":6333,"Ġemergency":6334,"rad":6335,"ĠPalestin":6336,"Ġ46":6337,"Ġautomatically":6338,"Ġroute":6339,"Ġpal":6340,"Ġbanks":6341,"ĠParis":6342,"ĠMedia":6343,"road":6344,"icing":6345,"ixt":6346,"isted":6347,"Ġgrew":6348,"Ġcoord":6349,"ĠWhere":6350,"omin":6351,"Ġsubs":6352,"��":6353,"Ġ±":6354,"Ġcorporate":6355,"Ġselection":6356,"noon":6357,"ĠReport":6358,"cs":6359,"cluding":6360,"orders":6361,"anche":6362,"ĠIts":6363,"Ġslowly":6364,"ĠEgypt":6365,"ĠAcc":6366,"Ġcolle":6367,"iques":6368,"EX":6369,"Ġattempts":6370,"url":6371,"ĠCross":6372,"Ġfindings":6373,"ĠSC":6374,"ĠOR":6375,"Ġindex":6376,"ensity":6377,"ĠWay":6378,"ĠLand":6379,"Ġshock":6380,"dis":6381,"Ġdynam":6382,"Ġcart":6383,"mosp":6384,"Since":6385,"iest":6386,"ĠBoy":6387,"Ġstorm":6388,"ĠContin":6389,"2013":6390,"hew":6391,"ilit":6392,"Ġessential":6393,"iquid":6394,"Other":6395,"ivered":6396,"Ġreasonable":6397,"Act":6398,"Ġsubsequ":6399,"ĠPack":6400,"ĠFort":6401,"Ġconsidering":6402,"Ġuniversity":6403,"log":6404,"Ġmarried":6405,"Ġillust":6406,"ĠTrue":6407,"£ı":6408,"Ġnumerous":6409,"rastructure":6410,"Ġseriously":6411,"Ġreferred":6412,"ua":6413,"Ġconsistent":6414,"onna":6415,"ĠReal":6416,"ruption":6417,"ciples":6418,"Ġfacts":6419,"91":6420,"otes":6421,"erg":6422,"Then":6423,"Ġaccompl":6424,"Note":6425,"Ġrevenue":6426,"Ġpassing":6427,"Ġmal":6428,"een":6429,"ĠYet":6430,"Ġgather":6431,"terday":6432,"ework":6433,"ĠAuthor":6434,"Pe":6435,"Ġoptim":6436,"Ġrub":6437,"Ġè£ı":6438,"Ġunknown":6439,"stone":6440,"Ġunion":6441,"olve":6442,"Ġopportunities":6443,"Ġbrowser":6444,"ĠWal":6445,"ĠCost":6446,"Ġreporting":6447,"sts":6448,"pet":6449,"Ġsand":6450,"Ġsuddenly":6451,"Ġsurprising":6452,"ĠVR":6453,"Ġsomewhat":6454,"ĠBas":6455,"ulture":6456,"izz":6457,"ĠCD":6458,"Ġchallenges":6459,"Ġsettings":6460,"Ġexperiences":6461,"ĠFull":6462,"Ġcann":6463,"Ġreceiving":6464,"EST":6465,"Ġjoint":6466,"Ġcultural":6467,"Ġast":6468,"82":6469,"astern":6470,"ceived":6471,"ĠCru":6472,"Ġbull":6473,"pired":6474,"amm":6475,"Ġfacing":6476,"power":6477,"Ġboss":6478,"ĠHol":6479,"Ġinstr":6480,"Ġincreasingly":6481,"Ġshift":6482,"Ġstreets":6483,"ĠWilliams":6484,"abb":6485,"Ġlie":6486,"Ġlaugh":6487,"ĠCa":6488,"PL":6489,"Ġadults":6490,"Ġcustomer":6491,"Ġobtained":6492,"Ġsupporting":6493,"html":6494,"fire":6495,"Ġdetailed":6496,"Ġpicked":6497,"ĠRight":6498,"lder":6499,"EE":6500,"stood":6501,"ĠKim":6502,"Ġwire":6503,"Ġsight":6504,"Ġdevelopers":6505,"Ġpersons":6506,"Ġsad":6507,"Ġcup":6508,"Ġwarning":6509,"Ġboys":6510,"long":6511,"Ġbird":6512,"fo":6513,"Ġwal":6514,"Ġobserved":6515,"Ġzone":6516,"iveness":6517,"Ġchannel":6518,"cript":6519,"Ġrefused":6520,"ĠAgain":6521,"Ġsuc":6522,"Ġspokesman":6523,"ĠRef":6524,"rite":6525,"ouston":6526,"ãĥ³":6527,"ĠSher":6528,"Ġacts":6529,"ĠName":6530,"Ġstruggle":6531,"arry":6532,"ometimes":6533,"Ġdiscrim":6534,"HT":6535,"Ġcategory":6536,"Ġrealize":6537,"Ġemployee":6538,"ĠAfghan":6539,"enger":6540,"Ġguns":6541,"ĠSteve":6542,"ĠMot":6543,"ĠOl":6544,"oked":6545,"Ġthick":6546,"Ġfairly":6547,"illy":6548,"Ġsurve":6549,"ĠMat":6550,"weight":6551,"âĶ":6552,"Ġtroops":6553,"Ġagents":6554,"Ġbattery":6555,"Ġmotiv":6556,"á":6557,"Sec":6558,"den":6559,"overy":6560,"LS":6561,"Ġflu":6562,"Ġconfident":6563,"ĠOper":6564,"Ġempty":6565,"Ġphen":6566,"Ġsector":6567,"Ġexcited":6568,"Ġremote":6569,"aph":6570,"oen":6571,"Ġdestroyed":6572,"Ġmoral":6573,"ĠHP":6574,"ĠRon":6575,"Ġdress":6576,"ĠBat":6577,"Ġlit":6578,"ĠMS":6579,"Ġaf":6580,"HL":6581,"rum":6582,"isms":6583,"Ġshouldn":6584,"Ġsympt":6585,"ĠToronto":6586,"hetic":6587,"Ġcarbon":6588,"Ġinstalled":6589,"Ġviolent":6590,"Ġsolar":6591,"ja":6592,"Ġpractices":6593,"Ġride":6594,"ĠPenn":6595,"Ġimproved":6596,"Ġaudio":6597,"Ġbehavi":6598,"ĠPS":6599,"Ġeating":6600,"Data":6601,"ĠReview":6602,"pass":6603,"claim":6604,"uated":6605,"angers":6606,"chen":6607,"Ġproperties":6608,"Ġanywhere":6609,"Another":6610,"Ġblow":6611,"ĠJackson":6612,"Ġproud":6613,"Ġplane":6614,"lines":6615,"Ġsquare":6616,"Ġproof":6617,"ansas":6618,"Ġtalked":6619,"makers":6620,"Ġsister":6621,"Ġholds":6622,"Ġresident":6623,"Ġ==":6624,"Ġresistance":6625,"Ġsplit":6626,"Ġprosecut":6627,"Ġconfidence":6628,"resents":6629,"Ġcuts":6630,"Ġexception":6631,"Ġzero":6632,"Getty":6633,"Ġcopyright":6634,"Ġtotally":6635,"ormal":6636,"ifications":6637,"ĠAustralian":6638,"Ġsick":6639,"Ġ150":6640,"Ġhousehold":6641,"Ġfees":6642,"Ġdrivers":6643,"ogen":6644,"ĠNY":6645,"Ġnecessarily":6646,"Ġregulations":6647,"earing":6648,"sl":6649,"Ġperspective":6650,"care":6651,"icial":6652,"His":6653,"Ġescape":6654,"Ġsurprised":6655,"ĠVan":6656,"urrent":6657,"Ġvac":6658,"81":6659,"ĠThus":6660,"Ġemphas":6661,"ĠChampions":6662,"ĠIce":6663,"Ġnarr":6664,"Ġheads":6665,"Ġcausing":6666,"bel":6667,"fortunately":6668,"ĠMa":6669,"Ġtargets":6670,"cipl":6671,"Ġafternoon":6672,"Ġadds":6673,"ĠMaybe":6674,"ĠFour":6675,"essed":6676,"plete":6677,"Ġusual":6678,"cho":6679,"ingu":6680,"Ġwithd":6681,"ĠEnergy":6682,"ĠEconom":6683,"OO":6684,"Ġarticles":6685,"Ġinjured":6686,"Ġmanage":6687,"Ġexplains":6688,"Ġdiagn":6689,"Rec":6690,"atures":6691,"Ġlinked":6692,"Ġdiscussed":6693,"Ġexplo":6694,"Ġoccasion":6695,"athan":6696,"Ġopposite":6697,"Ġfaces":6698,"Ġdenied":6699,"ĠKnight":6700,"Ġnut":6701,"Ġapproximately":6702,"Ġdisappoint":6703,"onymous":6704,"ĠBest":6705,"ĠLo":6706,"ĠHy":6707,"ĠAff":6708,"Ġvoting":6709,"anwhile":6710,"ĠIII":6711,"Ġinstitutions":6712,"agram":6713,"ĠDaily":6714,"Ġdrag":6715,"Ġnearby":6716,"Ġguilty":6717,"Ġconver":6718,"Pre":6719,"ship":6720,"Ġreward":6721,"Ġphilosoph":6722,"ĠSS":6723,"ugh":6724,"Ġapps":6725,"friend":6726,"Ġupper":6727,"Ġadvert":6728,"Ġsnow":6729,"Ġfrust":6730,"Ġourselves":6731,"Fr":6732,"ĠDie":6733,"ampion":6734,"Ġdismiss":6735,"Ġcere":6736,"Ġsignal":6737,"from":6738,"Ġ).":6739,"Ġ52":6740,"Ġcrimes":6741,"itors":6742,"estival":6743,"useum":6744,"Ġcouncil":6745,"ĠSaud":6746,"May":6747,"ĠGun":6748,"ician":6749,"ether":6750,"Ġsufficient":6751,"ĠHen":6752,"sole":6753,"Ġhistorical":6754,"ĠFar":6755,"ĠTurn":6756,"Ġpin":6757,"Ġsucceed":6758,"mat":6759,"lymp":6760,"Ġtradition":6761,"ĠOk":6762,"Ġcro":6763,"Ġdescription":6764,"alle":6765,"Ġsky":6766,"Te":6767,"Ġwidely":6768,"Ġwave":6769,"Ġdefinition":6770,"ĠJews":6771,"Ġcycle":6772,"Ġrefere":6773,"Ġbrings":6774,"usal":6775,"Ġalive":6776,"Ġfrequently":6777,"Ġintention":6778,"ĠControl":6779,"lv":6780,"ystem":6781,"Ġprivacy":6782,"gent":6783,"rence":6784,"ĠQuest":6785,"ĠChristmas":6786,"Ġrail":6787,"Ġcooper":6788,"Ġtested":6789,"ĠCapt":6790,"asks":6791,"Ġcomfortable":6792,"Ġdelivered":6793,"scape":6794,"Ġdepth":6795,"ĠGOP":6796,"Ġwrites":6797,"Ġassets":6798,"Ġsav":6799,"iments":6800,"Ġtransition":6801,"Ġartist":6802,"ĠLook":6803,"Ġlob":6804,"Ġcomponents":6805,"arity":6806,"Ġwalked":6807,"Ġroot":6808,"Ġparticipants":6809,"Ġnoticed":6810,"Ġresc":6811,"Ġnav":6812,"ĠAdminist":6813,"da":6814,"utral":6815,"plate":6816,"Ġimportance":6817,"Ġassert":6818,"iously":6819,"cription":6820,"Ġinjuries":6821,"ĠCheck":6822,"Ġregistered":6823,"Ġintent":6824,"Ġmissed":6825,"ographic":6826,"Ġsentence":6827,"ounter":6828,"Ġassistance":6829,"evin":6830,"Ġdatabase":6831,"Ġbuildings":6832,"Ġclassic":6833,"Ġthinks":6834,"ĠOhio":6835,"Pr":6836,"ugg":6837,"Ġfee":6838,"pan":6839,"Ġeffectively":6840,"Ġfacility":6841,"Ġbear":6842,"Ġchapter":6843,"Ġdogs":6844,"ĠColumb":6845,"Ġlatter":6846,"itial":6847,"Ġadmitted":6848,"TV":6849,"ĠGeorg":6850,"Ġposts":6851,"\\\\":6852,"Ġlawyer":6853,"Ġequival":6854,"Ġmand":6855,"Ġcontrolled":6856,"ĠWalk":6857,"ĠAndrew":6858,"Ġmenu":6859,"amental":6860,"Ġprotected":6861,"va":6862,"Ġadministr":6863,"oral":6864,"Ġrein":6865,"ĠSar":6866,"Ġamounts":6867,"Ġnative":6868,"ĠMoon":6869,"Ġrepresents":6870,"Ġabandon":6871,"Ġcarrying":6872,"Ġtank":6873,"mary":6874,"Ġdeclared":6875,"Tube":6876,"Ġhat":6877,"Ġpunish":6878,"ellect":6879,"mes":6880,"Ġuniverse":6881,"ĠRod":6882,"phy":6883,"Ġinfrastructure":6884,"Ġ51":6885,"Ġopposed":6886,"ownt":6887,"ca":6888,"ĠMake":6889,"Ġhardware":6890,"Ġcoffee":6891,"Rel":6892,"bal":6893,"world":6894,"ĠSaf":6895,"ĠSea":6896,"inals":6897,"Ġowned":6898,"Ġhall":6899,"ersion":6900,"Ġdescribe":6901,"ĠPot":6902,"Ġportion":6903,"Ġatmosp":6904,"Ġgovernments":6905,"Ġdepending":6906,"Ġoffense":6907,"Ġtrick":6908,"awa":6909,"ĠLine":6910,"ĠVis":6911,"ĠHard":6912,"ĠOrig":6913,"ĠClick":6914,"Ġdesk":6915,"ĠValley":6916,"ĠSov":6917,"Ġmovies":6918,"Ġremark":6919,"Ġmail":6920,"Ġconscious":6921,"Ġruling":6922,"ĠRights":6923,"Ġmedic":6924,"hent":6925,"ĠWomen":6926,"><":6927,"Ġreplaced":6928,"ĠPrem":6929,"ĠThanks":6930,"Ġrenew":6931,"ĠBall":6932,"iform":6933,"Ġshots":6934,"Comm":6935,"Ġarmed":6936,"Ġconstant":6937,"Ġtaste":6938,"Ġrealized":6939,"Ġbuff":6940,"Ġmo":6941,"Ġefficient":6942,"Most":6943,"oration":6944,"ifies":6945,"Ġcommunication":6946,"Ġflood":6947,"Ġconsequences":6948,"Ġanyway":6949,"igg":6950,"ĠGM":6951,"ĠThank":6952,"Ġiron":6953,"Ġevolution":6954,"ĠCop":6955,"twitter":6956,"Ġ95":6957,"Ġrelationships":6958,"adel":6959,"ĠYoung":6960,"Ġproposal":6961,"ayers":6962,"uilding":6963,"ĠHot":6964,"ORE":6965,"cos":6966,"Ġcollabor":6967,"PG":6968,"axy":6969,"Ġknowing":6970,"Ġsupports":6971,"owed":6972,"Ġcontrols":6973,"Ġmerely":6974,"umer":6975,"Ġathlet":6976,"Ġfashion":6977,"path":6978,"Ġgift":6979,"Ġera":6980,"AND":6981,"Ġkinds":6982,"ĠKorean":6983,"Ġlegit":6984,"ulous":6985,"Ġessentially":6986,"Ġtherap":6987,"nic":6988,"Ġsuffered":6989,"Ġhur":6990,"Ġpromise":6991,"Ġexcess":6992,"Ġoverw":6993,"Ġprime":6994,"ĠHouston":6995,"erry":6996,"ĠMs":6997,"RS":6998,"2012":6999,"Ġstores":7000,"ĠOlymp":7001,"Ġjourney":7002,"Although":7003,"Sub":7004,"ĠEduc":7005,"ĠChapter":7006,"Ġrequests":7007,"Ġconsumers":7008,"Ġtiny":7009,"Ġisol":7010,"ĠFair":7011,"ba":7012,"ĠYOU":7013,"Ġcrash":7014,"celer":7015,"Ġemotional":7016,"Ġgoods":7017,"Ġelected":7018,"Ġmoder":7019,"ĠLinux":7020,"Ġblocks":7021,"Ġisland":7022,"ĠSociety":7023,"Ġelections":7024,"Ġbroadcast":7025,"Ġcheap":7026,"Ġnations":7027,"Ġseasons":7028,"400":7029,"Ġwaste":7030,"ĠSat":7031,"Ġfields":7032,"employ":7033,"Ġprofile":7034,"Ġauthors":7035,"ALL":7036,"ĠGra":7037,"west":7038,"ĠTy":7039,"Ġdeaths":7040,"Ġvacc":7041,"Ġformed":7042,"Ġdu":7043,"Ġongoing":7044,"ĠMuslims":7045,"elf":7046,"igure":7047,"Ġassume":7048,"ĠUkraine":7049,"water":7050,"Ġcoast":7051,"Ġvoted":7052,"gor":7053,"ĠAS":7054,"ĠMichigan":7055,"aza":7056,"ĠArm":7057,"iro":7058,"Ġflex":7059,"asters":7060,"''":7061,"Ġwelcome":7062,"arl":7063,"Ġlocations":7064,"igation":7065,"ĠFil":7066,"Ġbuying":7067,"Ġarchitect":7068,"Ġharder":7069,"ĠCub":7070,"Ġinterface":7071,"Ġrestaurant":7072,"Ġdiscover":7073,"Ġexceed":7074,"Ġfavour":7075,"gery":7076,"Ġduty":7077,"Ġpitch":7078,"ador":7079,"ĠMach":7080,"boy":7081,"Ġresponded":7082,"Ġextended":7083,"hers":7084,"Many":7085,"raid":7086,"ifer":7087,"ĠIns":7088,"Ser":7089,"Ġmedium":7090,"she":7091,"ĠSports":7092,"Ġmagazine":7093,"utation":7094,"Ġlimits":7095,"ĠGall":7096,"Ġexternal":7097,"razil":7098,"Ġyounger":7099,"tle":7100,"Ġremind":7101,"ĠCON":7102,"Ġimmediate":7103,"Ġhidden":7104,"Ġvolunte":7105,"Ġsimpl":7106,"odcast":7107,"Ġphase":7108,"dr":7109,"Ġplot":7110,"Ġexposure":7111,"RI":7112,"ograp":7113,"vin":7114,"anish":7115,"ĠAcad":7116,"ĠEngine":7117,"Ġexpansion":7118,"ĠPay":7119,"Your":7120,"Ġpushed":7121,"ĠEll":7122,"ĠHead":7123,"Ġmarketing":7124,"ĠAC":7125,"ket":7126,"Ġhits":7127,"Ġgro":7128,"ĠAge":7129,"ĠScot":7130,"][":7131,"Ġstim":7132,"ĠiPhone":7133,"ĪĴ":7134,"Ġnarrow":7135,"ĠGetty":7136,"ĠTurkey":7137,"Ġperfectly":7138,"Ġenable":7139,"utch":7140,"Ġprecise":7141,"Ġregime":7142,"Ġshif":7143,"Ġcompens":7144,"gun":7145,"div":7146,"Ġchosen":7147,"ĠKen":7148,"Any":7149,"Ġtrees":7150,"Ġrecommended":7151,"ĠRen":7152,"uable":7153,"ĠHT":7154,"Follow":7155,"EG":7156,"ĠHand":7157,"ĠKenn":7158,"Ġarguments":7159,"Ġexists":7160,"Ġbike":7161,"ĠConserv":7162,"Ġbreaking":7163,"ĠGar":7164,"Ġcrazy":7165,"Ġvirtual":7166,"aylor":7167,"ixel":7168,"Ġ1980":7169,"Ġpermission":7170,"ĠSeries":7171,"Ġconsumer":7172,"Ġclosely":7173,"called":7174,"Ġ54":7175,"Ġhopes":7176,"Ġarray":7177,"ĠWin":7178,"ĠLabour":7179,"Ġspons":7180,"ĠIre":7181,"Ġpow":7182,"Ġreaders":7183,"Ġemployment":7184,"Ġcreature":7185,"Ġresulting":7186,"Ġaccurate":7187,"Ġmoments":7188,"Ġargued":7189,"Ġped":7190,"During":7191,"Ġ53":7192,"ĠTal":7193,"Ġsought":7194,"Ġsuffering":7195,"Ġicon":7196,"lee":7197,"Ġ($":7198,"alian":7199,"°":7200,"Ġpra":7201,"Ġbonus":7202,"(\"":7203,"ko":7204,"Ġacting":7205,"DE":7206,"fall":7207,"Ġcomparison":7208,"Ġsmooth":7209,"ĠNAS":7210,"upp":7211,"ĠJoseph":7212,"eping":7213,"ĠTake":7214,"ĠMid":7215,"Ġsending":7216,"fast":7217,"ĠFall":7218,"Ġdealing":7219,"user":7220,"ĠOrgan":7221,"Co":7222,"Ġattached":7223,"Ġsees":7224,"%.":7225,"Ġtypical":7226,"ART":7227,"Ġfinds":7228,"ĠAsia":7229,"umin":7230,"ĠCore":7231,"ĠEnt":7232,"inent":7233,"uce":7234,"ĠBlood":7235,"ĠNever":7236,"Ġemails":7237,"Ġhighlight":7238,"Ġconfront":7239,"atus":7240,"uted":7241,"Ġunus":7242,"Ġtopic":7243,"ĠAdam":7244,"Ġble":7245,"ati":7246,"Ġunderstood":7247,"Set":7248,"struct":7249,"TP":7250,"Ġmob":7251,"aa":7252,"ĠStart":7253,"pected":7254,"sell":7255,"Ġdedicated":7256,"ĠCA":7257,"uan":7258,"Ġsongs":7259,"escription":7260,"Ġtech":7261,"Ġrape":7262,"Ġaside":7263,"Ġgrant":7264,"Ġ56":7265,"sub":7266,"Ġargue":7267,"Ġcontaining":7268,"Ġschedule":7269,"Ġliberal":7270,"Ġpublicly":7271,"Ġheavily":7272,"ĠUt":7273,"iner":7274,"ĠSection":7275,"ĠCare":7276,"weet":7277,"ls":7278,"Dis":7279,"âĶĢ":7280,"ĠFollow":7281,"Back":7282,"ĠIT":7283,"Ġbes":7284,"ji":7285,"ĠHit":7286,"ested":7287,"Ġeverybody":7288,"ĠSwed":7289,"Ġfemin":7290,"Ġfacilities":7291,"Ġconven":7292,"Comp":7293,"ĠOS":7294,"core":7295,"Ġanx":7296,"Ġdivision":7297,"ĠCam":7298,"ĠStan":7299,"mates":7300,"Ġexplore":7301,"plom":7302,"Ġshares":7303,"pload":7304,"anes":7305,"Ġideal":7306,"eters":7307,"ĠBase":7308,"Ġplastic":7309,"Ġdistinct":7310,"ĠNetwork":7311,"ĠSeattle":7312,"Ġtrading":7313,"ensus":7314,"intend":7315,"Ġexhib":7316,"Ġinitially":7317,"ĠFood":7318,"Ġthousand":7319,"ĠBusiness":7320,"acter":7321,"Ġparagraph":7322,"Ġroughly":7323,"Ġwww":7324,"Ġcreative":7325,"ĠConf":7326,"Ġconsumption":7327,"Ġfilms":7328,"agan":7329,"Ġobtain":7330,"Ġtall":7331,"Ġtor":7332,"Ġacknowled":7333,"Ġgrown":7334,"alo":7335,"KE":7336,"Ġ400":7337,"enders":7338,"taining":7339,"UG":7340,"Ġsuicide":7341,"Ġwatched":7342,"ĠList":7343,"ali":7344,"rehens":7345,"Ġsurrounding":7346,"Ġpip":7347,"Ġflying":7348,"ĠJava":7349,"ordan":7350,"Ġserving":7351,"inations":7352,"post":7353,"Ġsho":7354,"Av":7355,"Ġjail":7356,"zy":7357,"Ġ1999":7358,"Ġ>":9609,"orous":9610,"Ġfirms":9611,"screen":9612,"una":9613,"Ġembarrass":9614,"ulse":9615,"Ġletting":9616,"Ġthrew":9617,"iley":9618,"Ġchannels":9619,"lan":9620,"ĠVegas":9621,"Ġsear":9622,"Ġfantastic":9623,"arre":9624,"uzzle":9625,"ĠDer":9626,"Those":9627,"Ġswing":9628,"Ġsheet":9629,"index":9630,"cover":9631,"ogan":9632,"Ġvariables":9633,"ĠTech":9634,"Ġspoken":9635,"achel":9636,"ĠDa":9637,"ĠMountain":9638,"Ġloaded":9639,"Ġfootage":9640,"version":9641,"Ġunl":9642,"ĠPhoenix":9643,"Ġthrowing":9644,"Ġfiring":9645,"Ġtracking":9646,"Ġwidth":9647,"Ġstruggling":9648,"rooms":9649,"otion":9650,"Ġmonthly":9651,"ĠServer":9652,"Ġeggs":9653,"open":9654,"MC":9655,"Ġ1993":9656,"Ġhired":9657,"Ġstayed":9658,"ĠAllen":9659,"Ġstro":9660,"Ġ98":9661,"step":9662,"ĠTurkish":9663,"Ġfabric":9664,"isting":9665,"ĠDom":9666,"Ġdates":9667,"Ġpron":9668,"Ġbasketball":9669,"Ġlucky":9670,"ĠArabia":9671,"Ġassumed":9672,"esty":9673,"Ġaffairs":9674,"Ġglad":9675,"ĠIndeed":9676,"ĠFA":9677,"ĠWord":9678,"Ġjoining":9679,"ifice":9680,"pread":9681,"irts":9682,"ĠSelect":9683,"Ġpopulations":9684,"aware":9685,"Ġnose":9686,"Ġcomplaints":9687,"start":9688,"Ġscoring":9689,"Thanks":9690,"Ġmining":9691,"Ġvisitors":9692,"SH":9693,"Ġdamaged":9694,"Ġcharacteristics":9695,"ĠPent":9696,"DC":9697,"Ġ83":9698,"ĠSix":9699,"rates":9700,"Ġflags":9701,"ĠBrew":9702,"dog":9703,"Mark":9704,"////":9705,"Ġexecution":9706,"Ġjoke":9707,"phones":9708,"Ġtestimony":9709,"Ġobst":9710,"QL":9711,"ĠCut":9712,"Ġstudied":9713,"ĠNintendo":9714,"icket":9715,"ĠNBC":9716,"Ġlad":9717,"ĠBra":9718,"ĠMoh":9719,"Ġkernel":9720,"Ġoverwhelming":9721,"Ġaged":9722,"Ġapplicable":9723,"ĠCond":9724,"Ġroads":9725,"ĠBlock":9726,"made":9727,"odge":9728,"Ġcommands":9729,"Ġoffices":9730,"veland":9731,"Ġtut":9732,"Ġreceiver":9733,"ĠFro":9734,"Ġshopping":9735,"ĠiP":9736,"ĠStre":9737,"ĠABC":9738,"Ġentertainment":9739,"ĠBow":9740,"orted":9741,"Mc":9742,"Ġreads":9743,"grad":9744,"ĠCollect":9745,"ĠâĪĴ":9746,"ĠCapital":9747,"ederation":9748,"Ġemployer":9749,"Ġinvolvement":9750,"Ġanxiety":9751,"alia":9752,"Ġroof":9753,"ĠAmong":9754,"ĠDemocrat":9755,"Ġstats":9756,"ĠVill":9757,"Ġconstitutional":9758,"Ġreferring":9759,"itty":9760,"Ġtackle":9761,"outube":9762,"Ġbacked":9763,"ĠHong":9764,"ĠBroad":9765,"Ġele":9766,"ĠOtt":9767,"Ġ1992":9768,"hour":9769,"achusetts":9770,"Cal":9771,"Ġdefeated":9772,"Ġ81":9773,"esp":9774,"Ġseemingly":9775,"was":9776,"ĠJenn":9777,"ĠKurd":9778,"Ġgene":9779,"Ġdiscount":9780,"Ret":9781,"ECT":9782,"();":9783,"Ġclubs":9784,"Ġsid":9785,"ĠMarsh":9786,"Check":9787,"Ġpp":9788,"ĠEag":9789,"idespread":9790,"Ġbeings":9791,"FT":9792,"Ġintroduction":9793,"ĠChange":9794,"ARD":9795,"Ġ110":9796,"adows":9797,"ierce":9798,"Ġmeal":9799,"author":9800,"ĠBang":9801,"lahoma":9802,"Ġranks":9803,"2011":9804,"????":9805,"max":9806,"Ġcollapse":9807,"Ġopens":9808,"Ġecho":9809,"Ġsoph":9810,"Ġracist":9811,"Ġenormous":9812,"Ġwaves":9813,"Ġtap":9814,"Ġcomprehensive":9815,".--":9816,"ĠRoy":9817,"Ġfarmers":9818,"Related":9819,"aired":9820,"rones":9821,"ĠCrim":9822,"Ġproportion":9823,"Ġdesigns":9824,"Ġnegotiations":9825,"Ġvirtually":9826,"ĠBatman":9827,"Ġwarn":9828,"Ġlegitimate":9829,"mate":9830,"Ġconvention":9831,",,":9832,"netic":9833,"ĠSD":9834,"Ġconsistently":9835,"Ġcompensation":9836,"Ġpunishment":9837,"Ġye":9838,"Ġtie":9839,"ĠBureau":9840,"irlf":9841,"ĠBu":9842,"ĠAren":9843,"ĠPhilipp":9844,"Ġknife":9845,"Ġmemories":9846,"ĠRoss":9847,"Ġangle":9848,"Ġ86":9849,"ĠThunder":9850,"Ġrend":9851,"ĠTour":9852,"Ġcounts":9853,"sung":9854,"ĠImp":9855,"Ġeducational":9856,"Ġaccessible":9857,"COM":9858,"Ġdrew":9859,"yer":9860,"Gl":9861,"amine":9862,"ORT":9863,"OB":9864,"IB":9865,"master":9866,"Ġtrials":9867,"ogy":9868,"har":9869,"ĠTrust":9870,"Ġpreferred":9871,"irlfriend":9872,"ĠNev":9873,"Ġbin":9874,"Ġcow":9875,"Page":9876,"Ġsignature":9877,"ĠBL":9878,"700":9879,"Ġretired":9880,"Ġbytes":9881,"Ġneighb":9882,"ĠLegend":9883,"Ġdevast":9884,"Ġsuspected":9885,"isons":9886,"ĠPokémon":9887,"scale":9888,"Ġcapabilities":9889,"Ġrevel":9890,"Ġcheese":9891,"dy":9892,"igrant":9893,"Ġfailing":9894,"bits":9895,"ĠHeroes":9896,"ĠGhost":9897,"ĠScient":9898,"Ġappointed":9899,"uri":9900,"Ġinstitution":9901,"Ġexpanded":9902,"greg":9903,"Ġmonitoring":9904,"Ġpodcast":9905,"Ġcoalition":9906,"Ġ96":9907,"Jo":9908,"Ġstolen":9909,"ĠSab":9910,"Ġstops":9911,"Ġholiday":9912,"Ġintr":9913,"Car":9914,"Black":9915,"ĠLGBT":9916,"Ġwarming":9917,"ĠAnderson":9918,"Ġ89":9919,"Ġproducer":9920,"Med":9921,"Ġaccuracy":9922,"ĠMarvel":9923,"izabeth":9924,"ĠPatrick":9925,"mony":9926,"Ġmini":9927,"acles":9928,"Ġovert":9929,"they":9930,"Ġmembership":9931,"ĠVen":9932,"Ġexch":9933,"Ġremoval":9934,"ĠDave":9935,"TY":9936,"mad":9937,"ĠFind":9938,"Ġadequ":9939,"Ġec":9940,"Ġteeth":9941,"Ġemotion":9942,"Ġperm":9943,"Ġsolely":9944,"db":9945,"Ġextraord":9946,"IGHT":9947,"cal":9948,"Ġguidelines":9949,"Ġdying":9950,"Ġsuspended":9951,"ĠPremier":9952,"ĠAnthony":9953,"elve":9954,"Ġdad":9955,"ĠEth":9956,"ĠFootball":9957,"Ġabandoned":9958,"Ġ<<":9959,"Ġmarch":9960,"Ġhorror":9961,"âĢ¦\"":9962,"Ġchildhood":9963,"Ġcampaigns":9964,"Ġlunch":9965,"ĠAlbert":9966,"block":9967,"âĸĪâĸĪ":9968,"ounding":9969,"Ġbone":9970,"organ":9971,"aders":9972,"ĠFlash":9973,"ĠDrive":9974,"Ġtonight":9975,"Ġwars":9976,"ĠFL":9977,"Ġformation":9978,"const":9979,"News":9980,"Ġcompe":9981,"orious":9982,"ĠStaff":9983,"Ġdiscussions":9984,"ĠProtection":9985,"ĠJam":9986,"Ġcriteria":9987,"Ġinstallation":9988,"Ġaccomplish":9989,"izza":9990,"Ġpublisher":9991,"Ġrescue":9992,"ĠTry":9993,"ULL":9994,"ĠSom":9995,"ĠHop":9996,"oret":9997,"ths":9998,"ordon":9999,"Ġpocket":10000,"ĠInv":10001,"Download":10002,"ĠCrime":10003,"Ġbene":10004,"ĠGuide":10005,"ĠAssembly":10006,"Ġparameters":10007,"IE":10008,"ĠAlexander":10009,"Ġconcert":10010,"ĠSche":10011,"Ġshoes":10012,"Ġvisiting":10013,"Ġrecall":10014,"Ġbub":10015,"Ġrural":10016,"Ġconcrete":10017,"ĠRos":10018,"Next":10019,"Russ":10020,"Ġloans":10021,"ĠShield":10022,"Ġtrem":10023,"hemat":10024,"kg":10025,"ĠHarris":10026,"isition":10027,"ĠMove":10028,"ĠFC":10029,"Ġfate":10030,"ĠCho":10031,"Ġtired":10032,"Ġprincipal":10033,"hist":10034,"iences":10035,"athy":10036,"Ġsevent":10037,"Ġmood":10038,"Ġstrategic":10039,"Ġdiseases":10040,"Ġforum":10041,"Ġtempor":10042,"Ġheadquarters":10043,"Par":10044,"ige":10045,"flix":10046,"Ġguitar":10047,"Ġ94":10048,"Only":10049,"Ġreleases":10050,"roph":10051,"================================":10052,"Ġ600":10053,"ĠContinue":10054,"igate":10055,"ĠCrit":10056,"system":10057,"Ġdisabled":10058,"Ġunexpected":10059,"ithub":10060,"Ġunclear":10061,"ĠEst":10062,"Ġcontrad":10063,"Ġstrategies":10064,"ventures":10065,"Ġpassage":10066,"AME":10067,"Ġimproving":10068,"Ġreveals":10069,"Ġdecrease":10070,"ova":10071,"Ġannoy":10072,"ĠShort":10073,"ĠLibrary":10074,"Ġcyber":10075,"nell":10076,"ĠHur":10077,"ĠCB":10078,"Ġphotograp":10079,"UI":10080,"Ġsed":10081,"Ge":10082,"Ġ87":10083,"Ġdiverse":10084,"Ġencouraged":10085,"Ġconspiracy":10086,"Ġbirds":10087,"Ġoperator":10088,"Ġhandful":10089,"Ġclassified":10090,"?)":10091,"Ġdramatic":10092,"Ġinvestigators":10093,"ito":10094,"Ġwidespread":10095,"ĠRoom":10096,"----------------------------------------------------------------":10097,"Ġcollective":10098,"Ġjournalist":10099,"String":10100,"Ġtemperatures":10101,"ila":10102,"Ġguid":10103,"Ġinspect":10104,"Ġmissile":10105,"ĠMayor":10106,"Ġmanual":10107,"Ġsimultane":10108,"Ġratings":10109,"Ġsuck":10110,"Ġ97":10111,"Ġuniversal":10112,"Ġpharm":10113,"Ġdisrupt":10114,"iano":10115,"AV":10116,"Ġft":10117,"Ġstatist":10118,"olds":10119,"ĠWalker":10120,"php":10121,"Ġundert":10122,"ĠLas":10123,"ishop":10124,"ntil":10125,"reshold":10126,"ĠWhether":10127,"Ms":10128,"Ġdeny":10129,"ĠCloud":10130,"Ġprovider":10131,"Ġsurviv":10132,"ĠUpdate":10133,"has":10134,"Ġmistakes":10135,"charge":10136,"pled":10137,"rity":10138,"Ġnode":10139,"ĠMassachusetts":10140,"ools":10141,"lication":10142,"Ġfails":10143,"emale":10144,"ori":10145,"backs":10146,"Ġshirt":10147,"Ġ''":10148,"ĠNAT":10149,"Ġwaters":10150,"elson":10151,"Ġease":10152,"Ġscar":10153,"Ġcontents":10154,"mind":10155,"Ġcontribution":10156,"Ġshr":10157,"Ġhanded":10158,"Ġstability":10159,"Ġtrave":10160,"Em":10161,"Ġmirror":10162,"123":10163,"Ġweigh":10164,"Ġfiction":10165,"ouver":10166,"istant":10167,"rition":10168,"ĠFed":10169,"Ġphysically":10170,"Ġstake":10171,"ĠArticle":10172,"ĠArc":10173,"ĠLewis":10174,"ĠMind":10175,"Ġdemonstrate":10176,"Ġprofits":10177,"vision":10178,"omic":10179,"olid":10180,"Ġbattles":10181,"Ġdrives":10182,"Ġeastern":10183,"ĠSony":10184,"!!!":10185,"aration":10186,"vard":10187,"ĠGL":10188,"portation":10189,"Ġ92":10190,"Ġlawmakers":10191,"Ġprotecting":10192,"ĠEPA":10193,"Ġyeah":10194,"Ġshame":10195,"olph":10196,"even":10197,"xit":10198,"Ġattach":10199,"Ġrepresenting":10200,"Ġobs":10201,"ĠUtah":10202,"iffs":10203,"ĠFreedom":10204,"ó":10205,"AK":10206,"Ġincidents":10207,"itage":10208,"Ġviewers":10209,"cd":10210,"Ġmouse":10211,"Ġclar":10212,"Ġaccordance":10213,"Ġbot":10214,"cor":10215,"ĠSummer":10216,"held":10217,"Ġinnocent":10218,"Ġinitiative":10219,"ols":10220,"________________________________":10221,"Ġspots":10222,"pace":10223,"Ġconventional":10224,"Ġcorporations":10225,"Ġblocked":10226,"HD":10227,"attered":10228,"Ġrefers":10229,"Ġbuck":10230,"ĠDigital":10231,"120":10232,"Ġtopics":10233,"TF":10234,"Äģ":10235,"brid":10236,"reement":10237,"Ġunderlying":10238,"ĠMember":10239,"Ġinvestigating":10240,"Ġpregnancy":10241,"Ġtouchdown":10242,"ĠBand":10243,"ĠCaller":10244,"Ġinstances":10245,"PP":10246,"wa":10247,"Good":10248,"Ġ1991":10249,"ĠCold":10250,"Ġfears":10251,"Ġremarks":10252,"ĨĴ":10253,"atal":10254,"Ġmit":10255,"Ġexperiments":10256,"ipt":10257,"Color":10258,"indu":10259,"Update":10260,"Ġ93":10261,"Ag":10262,"Ġå":10263,"ancouver":10264,"Both":10265,"Ġjudges":10266,"Object":10267,"Ġstere":10268,"umbn":10269,"Ġparticipation":10270,"ĠStars":10271,"ĠJere":10272,"Ġweekly":10273,"ĠBan":10274,"Ġconversations":10275,"ĠPitt":10276,"uz":10277,"ĠIndiana":10278,"ĠKick":10279,"Ġinfection":10280,"Ġheroes":10281,"Ġsettled":10282,"Ġstrip":10283,"Ġhal":10284,"Ġdump":10285,"ĠSci":10286,"Ġles":10287,"Ġreferences":10288,"ĠURL":10289,"ĠBridge":10290,"Ġwanting":10291,"Force":10292,"Ġexclus":10293,"Meanwhile":10294,"mn":10295,"Ġgentle":10296,"maker":10297,"senal":10298,"ĠGro":10299,"ouri":10300,"ĠRain":10301,"ĠAlliance":10302,"Ġlift":10303,"ela":10304,"SD":10305,"ĠCleveland":10306,"Ġranked":10307,"Ġstadium":10308,"Ġdeadly":10309,"ä¸":10310,"Ġriding":10311,"aria":10312,"ĠArmor":10313,"Ġdocumentation":10314,"ĠGreece":10315,"reek":10316,"Ġlens":10317,"ĠSa":10318,"Ġgross":10319,"ĠEmer":10320,"agers":10321,"ĠDub":10322,"ĠRh":10323,"ĠAMD":10324,"Ġarrival":10325,"Ġdesert":10326,"Ġsupplement":10327,"ĠResp":10328,"Ġknee":10329,"Ġmargin":10330,"font":10331,"ogg":10332,"2010":10333,"ĠPir":10334,"ĠProm":10335,"ivals":10336,"Ġintake":10337,"Ġdifferently":10338,"ugs":10339,"Ġbits":10340,"cluded":10341,"Ġsearching":10342,"ĠDu":10343,"umble":10344,"Ġfunctional":10345,"ĠBaltimore":10346,"ĠCould":10347,"Ġdesired":10348,"Ġcircuit":10349,"ĠLyn":10350,"ĠGO":10351,"ĠFalse":10352,"repre":10353,"':":10354,"alties":10355,"Ġminim":10356,"Ġdrove":10357,"ĠShould":10358,"Ġhip":10359,"Ġpros":10360,"Ġutility":10361,"ĠNature":10362,"ĠMode":10363,"President":10364,"opp":10365,"rat":10366,"formance":10367,"Ġconcentration":10368,"Ġfont":10369,"ĠBud":10370,"Ġamid":10371,"Ġrevers":10372,"ĠML":10373,"Bar":10374,"Ġinteraction":10375,"Ġjurisd":10376,"Ġspells":10377,"dep":10378,"fil":10379,"Ġcivilians":10380,"utter":10381,"ĠCooper":10382,"ĠBelow":10383,"Ġentrance":10384,"Ġconvert":10385,"Ġcontroversy":10386,"owered":10387,"Ġcontrary":10388,"Ġarc":10389,"ĠExecutive":10390,"ĠOfficer":10391,"Ġpackages":10392,"Ġprogressive":10393,"width":10394,"Ġreserved":10395,"vol":10396,"ĠSamsung":10397,"Ġprinted":10398,"Ġcenters":10399,"Ġintroduce":10400,"ĠKennedy":10401,"Ġodds":10402,"Ġsurely":10403,"Ġindependence":10404,"Ġpassengers":10405,"reprene":10406,"ĠBeh":10407,"Ġloves":10408,"ĠESPN":10409,"Ġfacilit":10410,"Ġidentical":10411,"Ġdoct":10412,"Ġpartnership":10413,"conf":10414,"ĠHide":10415,"Ġconfused":10416,"ĠCow":10417,"Men":10418,"Ġwrest":10419,"ĠIraqi":10420,"Ġholes":10421,"ĠStudies":10422,"Ġpregnant":10423,"hard":10424,"Ġsignals":10425,"IX":10426,"Ġpulling":10427,"Ġgraduate":10428,"Ġnominee":10429,"Date":10430,"Ġpermitted":10431,"ĠâĤ¬":10432,"ĠOklahoma":10433,"Start":10434,"Ġauthorized":10435,"Ġalarm":10436,"ĠCos":10437,"van":10438,"Ġgenerations":10439,"cular":10440,"Ġdragon":10441,"ĠSoftware":10442,"ĠEdward":10443,"Ġcontroller":10444,"Sen":10445,"gered":10446,"ĠVik":10447,"Ġapproached":10448,"Thank":10449,"Ġcance":10450,"Ġformula":10451,"ĠSmall":10452,"Ġweakness":10453,"Ġramp":10454,"itudes":10455,"jud":10456,"Ġbrilliant":10457,"Ġaccus":10458,"source":10459,"Ġ800":10460,"ĠEvil":10461,"Sw":10462,"Ġhomeless":10463,"week":10464,"iens":10465,"rics":10466,"ĠThird":10467,"TO":10468,"Ġorganic":10469,"Ġpresentation":10470,"agh":10471,"ĠDownload":10472,"vation":10473,"Ġassembly":10474,"orable":10475,"holders":10476,"ĠBernie":10477,"ĠHelp":10478,"Ġtong":10479,"ĠFight":10480,"Ġbeach":10481,"Book":10482,"ĠLic":10483,"Ġrush":10484,"ĠRound":10485,"oup":10486,"ĠMarx":10487,"Ġcalculated":10488,"ĠDevil":10489,"ĠSarah":10490,"Ġoccasionally":10491,"Ġbullet":10492,"Available":10493,"gate":10494,"Ġ91":10495,"Ġhosp":10496,"Ġpromises":10497,"ĠHIV":10498,"ĠStadium":10499,"ĠStock":10500,"ĠCorporation":10501,"gage":10502,"NG":10503,"ĠCredit":10504,"Ġsne":10505,"ibl":10506,"Ġaccum":10507,"such":10508,"Ġterrorists":10509,"Ġconsciousness":10510,"ĠZh":10511,"Ġdrama":10512,"oola":10513,"piration":10514,"Ġlabour":10515,"ĠNin":10516,"Ġutter":10517,"Ġdemocratic":10518,"Ġassass":10519,"ilation":10520,"Ġgest":10521,"Ġabroad":10522,"Ġmetab":10523,"Ġsorts":10524,"Ġflav":10525,"UB":10526,"Ġmg":10527,"ĠNothing":10528,"ĠOd":10529,"Ġmusical":10530,"2009":10531,"Ġdrops":10532,"ocated":10533,"ateral":10534,"000000":10535,"Ġgre":10536,"Ġequality":10537,"Ġburden":10538,"Ġvig":10539,"ĠLeader":10540,"------------":10541,"Ġceremony":10542,"Ġfighter":10543,"Ġactors":10544,"Ġæ":10545,"aman":10546,"Fi":10547,"Ġalign":10548,"puter":10549,"Ġelder":10550,"ĠNSA":10551,"Ġrepresentation":10552,"ĠOntario":10553,"ITH":10554,"usalem":10555,"Ġharassment":10556,"itzer":10557,"Ġsymp":10558,"Ġboxes":10559,"ĠDR":10560,"Ġmanifest":10561,"atre":10562,"Ġ^":10563,"Ġdies":10564,"leton":10565,"Ġmissions":10566,"ethe":10567,"Ġresolve":10568,"Ġfollowers":10569,"Ġasc":10570,"Ġkm":10571,"lord":10572,"ammed":10573,"Ġsilent":10574,"ĠAssociated":10575,"Ġtiming":10576,"Ġprisoners":10577,"ĠKings":10578,"ĠFive":10579,"Ġtower":10580,"Ġapproaches":10581,"Ġprecisely":10582,"Ġbureau":10583,"ĠMother":10584,"ĠIss":10585,"Ġkeyboard":10586,"itual":10587,"Ġfunded":10588,"Ġstaying":10589,"Ġpsychological":10590,"Ġmile":10591,"ĠLeon":10592,"ĠBarb":10593,"will":10594,"Ġwider":10595,"ĠAtlantic":10596,"Ġtill":10597,"ĠRome":10598,"rot":10599,"Ġaccompan":10600,"Ġflour":10601,"aco":10602,"World":10603,"ĠExpress":10604,"ĠYu":10605,"Cor":10606,"Ġpleased":10607,"party":10608,"Ġpointing":10609,"Ġinflation":10610,"Ġroy":10611,"Ġ),":10612,"ainer":10613,"Ġwedding":10614,"ormon":10615,"Ġrequiring":10616,"Ġqualified":10617,"Ġsegment":10618,"END":10619,"Ġsizes":10620,"eals":10621,"Ġcorrupt":10622,"assador":10623,"Ġceleb":10624,"Ġdreams":10625,"ĠMess":10626,"Ġchecking":10627,"ĠVersion":10628,"Ġpreparing":10629,"Ġactively":10630,"ĠDiff":10631,"Ġlux":10632,"ĠWinter":10633,"acteria":10634,"ĠNE":10635,"Ġdeputy":10636,"Ġtransgender":10637,"Ġsummary":10638,"Ġinher":10639,"eries":10640,"char":10641,"ĠYan":10642,"Ġknock":10643,"ĠPath":10644,"Ġlip":10645,"roller":10646,"Ġimpression":10647,"Ġcelebrate":10648,"Ġslide":10649,"Ġguests":10650,"Ġclip":10651,"FS":10652,"Ġsavings":10653,"Ġcaptain":10654,"Ġlegacy":10655,"ĠDenver":10656,"Ġwounded":10657,"taboola":10658,"ACT":10659,"Ġpursue":10660,"Ġoxy":10661,"Ġq":10662,"Ġsemi":10663,"ĠNeed":10664,"ĠAffairs":10665,"Ġobsc":10666,"Ġchecked":10667,"Ġdual":10668,"Code":10669,"ĠMD":10670,"lem":10671,"ulty":10672,"Ġ©":10673,"ĠElizabeth":10674,"Ġcenturies":10675,"arded":10676,"src":10677,"Ġevident":10678,"ennis":10679,"atin":10680,"Ġunemployment":10681,"ĠMario":10682,"Ġintim":10683,"Christ":10684,"Ġbiological":10685,"Ġsoldier":10686,"ĠAdded":10687,"Ġmath":10688,"ĠGil":10689,"Ġbias":10690,"Ġdating":10691,"ĠOcean":10692,"Ġmice":10693,"Mus":10694,"hire":10695,"ĠTes":10696,"Server":10697,"limited":10698,"Size":10699,"Ġmeters":10700,"Ġrocket":10701,"essee":10702,"Ġcertificate":10703,"ĠIranian":10704,"ASS":10705,"Ġgrid":10706,"Dec":10707,"Ġrolling":10708,"commun":10709,"ĠSweden":10710,"bury":10711,"Ġtissue":10712,"Ġracism":10713,"ĠLocal":10714,"Ġmystery":10715,"Ġexamine":10716,"Ġstem":10717,"Ġsits":10718,"Ġhoped":10719,"oting":10720,"Ġdialogue":10721,"Ġpersu":10722,"Watch":10723,"lay":10724,"MAN":10725,"Ġchronic":10726,"ĠPortland":10727,"market":10728,"ĠSEC":10729,"Ġparallel":10730,"Ġscandal":10731,"Ġcarries":10732,"Ġphenomenon":10733,"human":10734,"acker":10735,"ĠOx":10736,"Ġretirement":10737,"tainment":10738,"ovie":10739,"ĠGear":10740,"Ġduties":10741,"Ġdose":10742,"Ġscroll":10743,"MB":10744,"inf":10745,"Ġsauce":10746,"Ġlandscape":10747,"reddit":10748,"ĠChampionship":10749,"ĠReddit":10750,"alid":10751,"Ġcoin":10752,"Ġovers":10753,"Ġposting":10754,"about":10755,"Ġfel":10756,"andy":10757,"Ġbold":10758,"Ġfocusing":10759,"effect":10760,"GR":10761,"Ġdeemed":10762,"Ġrecommendations":10763,"Ġstepped":10764,"Ġvoter":10765,"ĠDeep":10766,"ĠInstagram":10767,"Ġmoderate":10768,"ĠMaryland":10769,"Ġrestricted":10770,"ĠMB":10771,"ĠChall":10772,"Ġtob":10773,"Ġcir":10774,"ĠOcc":10775,"ĠEver":10776,"Ġcollaps":10777,"INFO":10778,"=-":10779,"ĠPict":10780,"ĠAccount":10781,"nc":10782,"Ġought":10783,"Ġexport":10784,"Ġdrunk":10785,"('":10786,"Ġwise":10787,"ĠMort":10788,"necess":10789,"Ġancest":10790,"ĠIncre":10791,"Ġfrequent":10792,"mir":10793,"Ġinterpretation":10794,"Ġdependent":10795,"Ġcoins":10796,"ĠBol":10797,"Video":10798,"ĠJustin":10799,"Ġfatal":10800,"Ġcooking":10801,"Ġconfusion":10802,"ipher":10803,"Ġcustody":10804,"ĠMorgan":10805,"omach":10806,"ĠGovernor":10807,"Ġrestaurants":10808,"eling":10809,"Ġacknowledged":10810,"Ġther":10811,"Ġgenes":10812,"ching":10813,"Hey":10814,"Ġtactics":10815,"ĠMexican":10816,"Ġvend":10817,"Ġhes":10818,"quer":10819,"Ġnoting":10820,"ĠCameron":10821,"Ġtargeting":10822,"rock":10823,"Ġcredits":10824,"Ġemotions":10825,"Ġrepresentatives":10826,"news":10827,"Ġlegislative":10828,"Ġremoving":10829,"Ġtweeted":10830,"ĠCarter":10831,"ĠFixed":10832,"Ġforcing":10833,"Ġspeaker":10834,"Ġmales":10835,"ĠVietnam":10836,"lined":10837,"Ġconcepts":10838,"Ġvoices":10839,"oir":10840,"ĠTrib":10841,"Whe":10842,"ĠJerusalem":10843,"ĠSant":10844,"Ġcul":10845,"Ġlady":10846,"ĠHawai":10847,"Ġarts":10848,"ĠInn":10849,"ĠMachine":10850,"ĠEmperor":10851,"Ġslot":10852,"gly":10853,"ĠProcess":10854,"III":10855,"Ġathletes":10856,"ĠTemple":10857,"ĠRepresent":10858,"Ġpresc":10859,"Ġtons":10860,"Ġgolden":10861,"Ġpunch":10862,"ĠGR":10863,"iverpool":10864,"Ġenact":10865,"Ġlobby":10866,"Ġmos":10867,"Ġpicking":10868,"Ġlifetime":10869,"Ġcognitive":10870,"Each":10871,"zo":10872,"Ġdub":10873,"Ġconsists":10874,"oln":10875,"Ġfestival":10876,"amous":10877,"Ġintellig":10878,"words":10879,"ĠSmart":10880,"Ġdele":10881,"Ġlapt":10882,"Ġmagical":10883,"ĠSin":10884,"bus":10885,"urities":10886,"ighth":10887,"ĠRuby":10888,"ĠSure":10889,"olving":10890,"Ġjun":10891,"OST":10892,"Ġimposed":10893,"Ġastron":10894,"Ġcorrel":10895,"ĠNS":10896,"ĠKit":10897,"ĠFuture":10898,"burn":10899,"Ġimmune":10900,"ocus":10901,"Ġcourses":10902,"ĠString":10903,"Ġlean":10904,"Ġghost":10905,"Ġoutcomes":10906,"Ġexpense":10907,"Ġeveryday":10908,"Ġacceptable":10909,"Ah":10910,"Ġequipped":10911,"Ġorange":10912,"FR":10913,"ĠDutch":10914,"Though":10915,"ĠRank":10916,"QU":10917,"ĠRoberts":10918,"what":10919,"rend":10920,"Ġdisappear":10921,"Ġspawn":10922,"ĠLam":10923,"ois":10924,"Ġdeserve":10925,"Ġminimal":10926,"Ġnervous":10927,"ĠWould":10928,"Ġrook":10929,"ĠVancouver":10930,"Ġresign":10931,"shire":10932,"ĠWorks":10933,"ĠBuild":10934,"Ġaffordable":10935,"ĠGary":10936,"ĠArena":10937,"Ġhanging":10938,"Ġimplications":10939,"ĠSong":10940,"Ġmaintaining":10941,"Ġguards":10942,"CON":10943,"Ġderived":10944,"Ġexecuted":10945,"Ġtheories":10946,"Ġquoted":10947,"ĠAndre":10948,"oga":10949,"seless":10950,"info":10951,"ĠBelg":10952,"Ġtears":10953,"ĠSurv":10954,"Ġbirthday":10955,"igious":10956,"immer":10957,"Ġspectrum":10958,"Ġarchitecture":10959,"Ġrecruit":10960,"arma":10961,"Table":10962,"Ġmonsters":10963,"ĠGov":10964,"Ġdestination":10965,"Ġattractive":10966,"Ġfoss":10967,"ĠMoreover":10968,"Ġpresents":10969,"THE":10970,"Ġreply":10971,"pton":10972,"Ġcum":10973,"Ġdelight":10974,"Ġaffects":10975,"Ġdonations":10976,"ĠToy":10977,"ĠHim":10978,"MENT":10979,"Ġovercome":10980,"itched":10981,"ĠFantasy":10982,"ĠHat":10983,"ĠBeast":10984,"bott":10985,"Ġinvestigations":10986,"Run":10987,"Ġhunting":10988,"di":10989,"fund":10990,"Ġsessions":10991,"estyle":10992,"Ġportray":10993,"oids":10994,"Yeah":10995,"Ġcommunicate":10996,"Ġcomedy":10997,"ĠYang":10998,"Ġbelt":10999,"ĠMarine":11000,"Ġpredicted":11001,"Play":11002,"Ġimportantly":11003,"Ġremarkable":11004,"Ġeliminate":11005,"David":11006,"Ġbind":11007,"VID":11008,"Ġadvocates":11009,"ĠGaza":11010,"imp":11011,"DB":11012,"ĠNa":11013,"ĠSimilar":11014,"IES":11015,"Ġcharity":11016,"vas":11017,"math":11018,"Ġâĸ":11019,"oker":11020,"ndum":11021,"Ġcaps":11022,"ĠHal":11023,"2000":11024,"ean":11025,"Ġfleet":11026,"Ġrecre":11027,"Right":11028,"Ġsleeping":11029,"ijing":11030,"kind":11031,"Ġdesignated":11032,"ä":11033,"Ġanimation":11034,"kee":11035,"ĠIntrodu":11036,"Ġ/>":11037,"Ġdelayed":11038,"Ġtremend":11039,"Ġcurious":11040,"Use":11041,"Ġlect":11042,"dam":11043,"Ġinnovation":11044,"ĠPoints":11045,"Ġloading":11046,"Ġdispute":11047,"ctic":11048,"irds":11049,"ĠBY":11050,"Ġnurs":11051,"ĠValue":11052,"IONS":11053,"ĠHum":11054,"Ġtemplate":11055,"mers":11056,"Ġappearances":11057,"ĠEntertainment":11058,"Ġtranslation":11059,"Ġsake":11060,"Ġbeneath":11061,"Ġinhib":11062,"Ġeuro":11063,"abetes":11064,"Ġstudying":11065,"ĠMas":11066,"Ġperceived":11067,"Ġexamined":11068,"Ġeager":11069,"Ġcoaches":11070,"Ġimper":11071,"chi":11072,"Ġproduces":11073,"\").":11074,"ĠEveryone":11075,"Ġmunicip":11076,"Ġgirlfriend":11077,"Ġhire":11078,"ĠVice":11079,"Ġsuitable":11080,"opy":11081,"Ġinequ":11082,"ĠDuke":11083,"fish":11084,"first":11085,"ĠObs":11086,"Ġinterior":11087,"ĠBruce":11088,"ĠRy":11089,"Ġanalys":11090,"Ġconsiderable":11091,"Ġforecast":11092,"Ġfert":11093,"orship":11094,"ĠDrug":11095,"ĠALL":11096,":\"":11097,"thur":11098,"ĠMail":11099,"Ġballot":11100,"Ġinstantly":11101,"ĠChannel":11102,"Ġpicks":11103,"Ġ1989":11104,"Ġtent":11105,"oli":11106,"Ġcivilian":11107,"bling":11108,"ello":11109,"bu":11110,"Ġinch":11111,"Ġlogo":11112,"Ġcooperation":11113,"Ġwalks":11114,"Ġinvestments":11115,"Ġimprison":11116,"ĠFestival":11117,"ĠKy":11118,"Ġlegally":11119,"Ġgri":11120,"charg":11121,"Sl":11122,"Ġthreatening":11123,"duction":11124,"flow":11125,"Ġdismissed":11126,"ibraries":11127,"cap":11128,"ele":11129,"ĠMcG":11130,"ĠHarvard":11131,"ĠConservative":11132,"ĠCBS":11133,"png":11134,"Ġroots":11135,"ĠHaving":11136,"umbled":11137,"ĠFun":11138,"\\/":11139,"ĠSearch":11140,"plex":11141,"Ġdiscussing":11142,"Ġcontinu":11143,"ĠTai":11144,"ĠWik":11145,"Free":11146,"fit":11147,"Ġrefuse":11148,"Ġmanaging":11149,"Ġsynd":11150,"ipedia":11151,"walk":11152,"Ġprofessionals":11153,"Ġguidance":11154,"Ġuniversities":11155,"Ġassemb":11156,"untu":11157,"Finally":11158,"ASE":11159,"ĠAuto":11160,"ĠHad":11161,"Ġanniversary":11162,"LD":11163,"ĠDur":11164,"ĠUltimate":11165,"ihad":11166,"product":11167,"Ġtransit":11168,"Ġrestore":11169,"Ġexplaining":11170,"Ġasset":11171,"Ġtransferred":11172,"Ġburst":11173,"apolis":11174,"ĠMagazine":11175,"ĠCra":11176,"ĠBR":11177,"gged":11178,"ĠHE":11179,"Mich":11180,"bet":11181,"ĠLady":11182,"ylum":11183,"erves":11184,"Ġmeets":11185,"white":11186,"Log":11187,"Ġcorresponding":11188,"Ġinsisted":11189,"GG":11190,"Ġsurrounded":11191,"Ġtens":11192,"Ġlane":11193,"Ġcoinc":11194,"home":11195,"Ġexisted":11196,"ected":11197,"ĠDouble":11198,"lamm":11199,"Ġskept":11200,"exp":11201,"Ġperception":11202,"iev":11203,"ĠBeing":11204,"oft":11205,"Ġadopt":11206,".:":11207,"];":11208,"Windows":11209,"Ġsatellite":11210,"ASH":11211,"Ġinfant":11212,"description":11213,"ĠMeanwhile":11214,"cm":11215,"oca":11216,"ĠTreat":11217,"actor":11218,"Ġtobacco":11219,"ĠNorm":11220,"emption":11221,"Ġflesh":11222,"Ġje":11223,"oop":11224,"ĠHeaven":11225,"Ġbeating":11226,"anim":11227,"Ġgathering":11228,"Ġcultiv":11229,"GO":11230,"abe":11231,"ĠJonathan":11232,"ĠSafety":11233,"Ġbadly":11234,"prot":11235,"Ġchoosing":11236,"Ġcontacted":11237,"Ġquit":11238,"Ġdistur":11239,"Ġstir":11240,"Ġtoken":11241,"Det":11242,"ĠPa":11243,"Ġfunctionality":11244,"003":11245,"some":11246,"Ġlimitations":11247,"Ġmeth":11248,"build":11249,"config":11250,"NT":11251,"rell":11252,"blem":11253,"ĠMom":11254,"Ġveterans":11255,"ĠHu":11256,"Ġtrends":11257,"arer":11258,"ĠGiven":11259,"ĠCaption":11260,"may":11261,"AST":11262,"Ġwondering":11263,"ĠClark":11264,"normal":11265,"Ġseparated":11266,"Ġdesp":11267,"stic":11268,"brew":11269,"Ġrelating":11270,"ĠNik":11271,"ĠFarm":11272,"Ġenthusi":11273,"good":11274,"deb":11275,"Ġactivist":11276,"Ġmart":11277,"Ġexplosion":11278,"ĠEconomic":11279,"Link":11280,"Ġinsight":11281,"Ġconvenient":11282,"Ġcounterpart":11283,"support":11284,"ĠVirt":11285,"agen":11286,"ĠTennessee":11287,"ĠSimon":11288,"ĠAward":11289,"OCK":11290,"ĠFigure":11291,"Ġoverseas":11292,"Ġpride":11293,"ĠCas":11294,"note":11295,"mg":11296,"Current":11297,"Ġdisplays":11298,"content":11299,"Ġtraveling":11300,"Ġhospitals":11301,"ĠFinancial":11302,"ĠPast":11303,"Ġdefendant":11304,"Ġstreaming":11305,"mble":11306,"ĠBerlin":11307,"uki":11308,"Ġdistribut":11309,"Ġantib":11310,"Ġchocolate":11311,"ĠCastle":11312,"Ġinterrupt":11313,"ĠRow":11314,"Ġconversion":11315,"Ġbugs":11316,"ĠRather":11317,"liest":11318,"LY":11319,"ĠJean":11320,"common":11321,"akh":11322,"Ġ130":11323,"otton":11324,"ĠDean":11325,"Ġamendment":11326,"Ġgameplay":11327,"ĠWarren":11328,"oda":11329,"Ġhighlights":11330,"Ġirre":11331,"ĠNATO":11332,"Ġballs":11333,"Ġdemanding":11334,"URE":11335,"ĠLuke":11336,"Figure":11337,"stop":11338,"onia":11339,"zone":11340,"izers":11341,"ĠWR":11342,"Ġawarded":11343,"Ġregulatory":11344,"ĠHart":11345,"ĠSN":11346,"pling":11347,"Ġsour":11348,"ĠPixel":11349,"usive":11350,"Ġfet":11351,"ĠSent":11352,"Ġautomatic":11353,"Ġfer":11354,"vernment":11355,"ĠKhan":11356,"TON":11357,"father":11358,"Ġextraordinary":11359,"throp":11360,"ĠPython":11361,"ĠGPU":11362,"Ġsexually":11363,"Ġdesktop":11364,"itivity":11365,"ĠAntonio":11366,"Ġorient":11367,"Ġears":11368,"obby":11369,"ouses":11370,"vertisements":11371,"Ġmanufacturers":11372,"icient":11373,"minute":11374,"Ġconviction":11375,"Ġgarden":11376,"public":11377,"Ġsatisfied":11378,"fold":11379,"OK":11380,"Ġinhab":11381,"ĠThink":11382,"Ġprogramme":11383,"Ġstomach":11384,"Ġcoordin":11385,"Ġholy":11386,"Ġthreshold":11387,"Ġrhet":11388,"Ġserial":11389,"Ġemployers":11390,"ĠEverything":11391,"rah":11392,"Ġbother":11393,"Ġbrands":11394,"Value":11395,"ĠTed":11396,"ĠPlanet":11397,"Ġpink":11398,"ĠFurthermore":11399,"sa":11400,"PE":11401,"reck":11402,"ĠUSD":11403,"otte":11404,"Ġ&&":11405,"Ġlanded":11406,"gets":11407,"Ġproducers":11408,"Ġhealthcare":11409,"Ġdominant":11410,"Ġdestro":11411,"Ġamended":11412,"chron":11413,"Ġfits":11414,"ĠSyd":11415,"ĠAuthority":11416,"ATCH":11417,"Ġfights":11418,"ĠLLC":11419,"Ġ---":11420,"ĠCorp":11421,"Ġtoxic":11422,"specific":11423,"ĠCorn":11424,"ĠChel":11425,"Ġtelephone":11426,"ĠPant":11427,"Ġmysterious":11428,"aunch":11429,"odox":11430,"media":11431,"Ġwitnesses":11432,"agu":11433,"Ġquestioned":11434,"ĠBrexit":11435,"ĠRemember":11436,"enez":11437,"Ġendorse":11438,"iatric":11439,"ĠIdent":11440,"Ġridiculous":11441,"110":11442,"Ġprayer":11443,"Ġscientist":11444,"Ġ1950":11445,"ĠAqu":11446,"Ġunderground":11447,"ĠUFC":11448,"mare":11449,"ĠLater":11450,"wich":11451,"Ġsubscrib":11452,"Ġhosts":11453,"Ġerr":11454,"Ġgrants":11455,"antom":11456,"Ġsummon":11457,"early":11458,"ĠClear":11459,"ĠPrim":11460,"Ġsuspension":11461,"Ġguaranteed":11462,"apper":11463,"Ġrice":11464,"ĠSean":11465,"ĠShin":11466,"Ġreferendum":11467,"Ġfled":11468,"rust":11469,"Ġ360":11470,"tery":11471,"Ġshocked":11472,"BR":11473,"ĠOil":11474,"ĠAllah":11475,"Ġpartly":11476,"Ġignor":11477,"Ġtransmission":11478,"Ġhomosexual":11479,"iversal":11480,"Ġhopefully":11481,"ãĤ¤":11482,"Ġlesson":11483,"Leg":11484,"Ġ..":11485,"Yet":11486,"table":11487,"appropri":11488,"rett":11489,"Ġboards":11490,"Ġincorrect":11491,"Ġbacteria":11492,"aru":11493,"amac":11494,"Ġsnap":11495,".'\"":11496,"Ġparad":11497,"tem":11498,"heart":11499,"Ġavailability":11500,"Ġwisdom":11501,"Ġ(+":11502,"Ġpriest":11503,"ĠÂłĠÂł":11504,"Open":11505,"Ġspan":11506,"Ġparameter":11507,"Ġconvince":11508,"Ġ(%)":11509,"rac":11510,"Ġfo":11511,"Ġsafely":11512,"Ġconverted":11513,"ĠOlympic":11514,"Ġreserve":11515,"Ġhealing":11516,"ĠMine":11517,"Max":11518,"Ġinherent":11519,"ĠGraham":11520,"Ġintegrated":11521,"Dem":11522,"Ġpipeline":11523,"Ġapplying":11524,"Ġembed":11525,"ĠCharlie":11526,"Ġcave":11527,"2008":11528,"Ġconsensus":11529,"Ġrewards":11530,"Pal":11531,"ĠHTML":11532,"Ġpopularity":11533,"looking":11534,"ĠSword":11535,"ĠArts":11536,"')":11537,"Ġelectron":11538,"clusions":11539,"Ġintegrity":11540,"Ġexclusively":11541,"Ġgrace":11542,"Ġtorture":11543,"Ġburned":11544,"two":11545,"Ġ180":11546,"Produ":11547,"Ġentreprene":11548,"raphics":11549,"Ġgym":11550,"ricane":11551,"ĠTam":11552,"Ġadministrative":11553,"Ġmanufacturer":11554,"Ġvel":11555,"ĠNi":11556,"Ġisolated":11557,"ĠMedicine":11558,"Ġbackup":11559,"Ġpromoting":11560,"Ġcommander":11561,"Ġflee":11562,"ĠRussell":11563,"Ġforgotten":11564,"ĠMissouri":11565,"Ġresidence":11566,"mons":11567,"Ġresemb":11568,"Ġwand":11569,"Ġmeaningful":11570,"PT":11571,"Ġbol":11572,"Ġhelic":11573,"Ġwealthy":11574,"Ġrifle":11575,"strong":11576,"rowing":11577,"plan":11578,"asury":11579,"âĢ¦.":11580,"Ġexpanding":11581,"ĠHamilton":11582,"Ġreceives":11583,"SI":11584,"eatures":11585,"ĠAnim":11586,"REE":11587,"Put":11588,"Ġbriefly":11589,"rive":11590,"Ġstimul":11591,"Ġ``(":11592,"Ġ__":11593,"Ġchip":11594,"Ġhaz":11595,"Ġprize":11596,"ĠThings":11597,"ACE":11598,"ulin":11599,"dict":11600,"oku":11601,"Ġassociate":11602,"ockets":11603,"youtube":11604,"Story":11605,"ategory":11606,"Ġmild":11607,"ailing":11608,"ĠYe":11609,"Orig":11610,"ĠKa":11611,"orig":11612,"Ġpropaganda":11613,"Ġanonymous":11614,"Ġstruggled":11615,"Ġoutrage":11616,"ATED":11617,"ĠBeijing":11618,"rary":11619,"Ġleather":11620,"Ġworlds":11621,"Ġbroader":11622,"125":11623,"idal":11624,"ĠBetter":11625,"Ġtear":11626,"Ext":11627,"Ġproposals":11628,"Ġiter":11629,"ĠSquad":11630,"Ġvolunt":11631,"mi":11632,"Did":11633,"ĠPu":11634,"pin":11635,"Ġspeakers":11636,"Ġborders":11637,"Ġfigured":11638,"='":11639,"Ġsimultaneously":11640,"aeda":11641,"Ġcharging":11642,"Ġurged":11643,"Ġconj":11644,"256":11645,"ĠGordon":11646,"merce":11647,"Ġdocumentary":11648,"Share":11649,"itol":11650,"ONE":11651,"ĠGarden":11652,"hatt":11653,"ĠThompson":11654,"aneous":11655,"apore":11656,"Ġtanks":11657,"Ġlessons":11658,"track":11659,"Ġoutstanding":11660,"Ġvolunteers":11661,"Ġspray":11662,"Ġmanagers":11663,"large":11664,"Ġcamps":11665,"Ġartificial":11666,"ĠRu":11667,"Ġbags":11668,"thal":11669,"Ġcompatible":11670,"ĠBlade":11671,"Ġfed":11672,"Ġargues":11673,"FI":11674,"Ġunfair":11675,"Ġcorn":11676,"Ġoffset":11677,"Ġdirections":11678,"Ġdisappointed":11679,"ĠConvention":11680,"Ġviewing":11681,"ME":11682,"ocity":11683,"Ġtowns":11684,"Ġlayers":11685,"Ġrolled":11686,"Ġjumped":11687,"Ġattribute":11688,"Ġunnecess":11689,"incoln":11690,"Ġsuppose":11691,"ĠNether":11692,"cha":11693,"Ġburied":11694,"Ġsixth":11695,"Ben":11696,"ressing":11697,"OUR":11698,"Ġwound":11699,"Ġcycl":11700,"Ġmechanisms":11701,"Ġcongressional":11702,"ĠElement":11703,"Ġagreements":11704,"Ġdecor":11705,"Ġclosest":11706,"ĠMit":11707,"Google":11708,"}}":11709,"Ġmixture":11710,"Ġfluid":11711,"Sign":11712,"ĠScholar":11713,"Ġpist":11714,"asket":11715,"abling":11716,"Ġracing":11717,"hero":11718,"riel":11719,"assy":11720,"Ġcheaper":11721,"ben":11722,"Ġvertical":11723,"amacare":11724,"ĠReading":11725,"gments":11726,"Ġhelicop":11727,"Ġsacrifice":11728,"aya":11729,"paren":11730,"VA":11731,"ĠLes":11732,"ĠStudio":11733,"Ġviolations":11734,"ĠAnna":11735,"acer":11736,"é¾":11737,"ĠRat":11738,"ĠBeck":11739,"ĠDick":11740,"ĠACT":11741,"Ġcomposition":11742,"Ġtexture":11743,"ĠOwn":11744,"Ġsmartphone":11745,"ĠNA":11746,"Ġforb":11747,"import":11748,"Ġdefending":11749,"ilst":11750,"rer":11751,"Ġoh":11752,"ĠJeremy":11753,"Ġbanking":11754,"ceptions":11755,"Ġrespective":11756,"/.":11757,"Ġdrinks":11758,"ĠWi":11759,"Ġbands":11760,"ĠLiverpool":11761,"Ġgrip":11762,"ĠBuy":11763,"Ġopenly":11764,"Ġreviewed":11765,"pert":11766,"Ġverify":11767,"ĠCole":11768,"ĠWales":11769,"MO":11770,"Ġunpre":11771,"Ġshelter":11772,"ĠImperial":11773,"Ġgui":11774,"ĠDak":11775,"Ġsuggestions":11776,"Ġexplicitly":11777,"Ġslave":11778,"Ġblockchain":11779,"Ġcompeting":11780,"Ġpromising":11781,"SON":11782,"Ġsoccer":11783,"Ġconstitution":11784,"429":11785,"Ġdistract":11786,"ĠUser":11787,"esides":11788,"ĠMethod":11789,"ĠTokyo":11790,"Ġaccompanied":11791,"Client":11792,"sur":11793,"alog":11794,"Ġidentification":11795,"Ġinvasion":11796,"asma":11797,"Ġindustries":11798,"ppers":11799,"Ġsubtle":11800,"ĠUnit":11801,"natural":11802,"Ġsurvived":11803,"Ġflaw":11804,"ĺħ":11805,"ĠHoll":11806,"Ġdeficit":11807,"Ġtutorial":11808,"ĠChance":11809,"Ġarguing":11810,"Ġcontemporary":11811,"Ġintegration":11812,"forward":11813,"Ġtum":11814,"itis":11815,"Ġhiding":11816,"ĠDomin":11817,"ĠTan":11818,"ĠBuilding":11819,"ĠVin":11820,"Ġspokesperson":11821,"ĠNotes":11822,"Ġemerging":11823,"Ġpreparation":11824,"Ġprost":11825,"Ġsuspects":11826,"Ġautonom":11827,"Description":11828,"Ġdealt":11829,"ĠPear":11830,"Ġsteady":11831,"Ġdecreased":11832,"Ġsovere":11833,"ĠClin":11834,"Ġgradually":11835,"orses":11836,"ĠWAR":11837,"Serv":11838,"ãĤ¢":11839,"hr":11840,"Ġdirty":11841,"ĠBarn":11842,"ĠBC":11843,"Ġdil":11844,"Ġcalendar":11845,"Ġcompliance":11846,"Ġchamber":11847,"bb":11848,"Ġpassenger":11849,"ateful":11850,"ĠTitle":11851,"ĠSydney":11852,"ĠGot":11853,"Ġdarkness":11854,"Ġdefect":11855,"Ġpacked":11856,"assion":11857,"Ġgods":11858,"Ġharsh":11859,"ICK":11860,"leans":11861,"Ġalgorithm":11862,"Ġoxygen":11863,"Ġvisits":11864,"Ġblade":11865,"Ġkilomet":11866,"ĠKentucky":11867,"Ġkiller":11868,"Pack":11869,"enny":11870,"Ġdivine":11871,"Ġnomination":11872,"being":11873,"Ġengines":11874,"Ġcats":11875,"Ġbuffer":11876,"ĠPhill":11877,"Ġtraff":11878,"AGE":11879,"Ġtongue":11880,"Ġradiation":11881,"erer":11882,"mem":11883,"ĠExplicit":11884,"é¾į":11885,"Ġcouples":11886,"Ġphysics":11887,"ĠMcK":11888,"Ġpolitically":11889,"awks":11890,"ĠBloom":11891,"Ġworship":11892,"eger":11893,"uter":11894,"ĠFO":11895,"Ġmathemat":11896,"Ġsentenced":11897,"Ġdisk":11898,"ĠMarg":11899,"Ġ/*":11900,"PI":11901,"Ġoptional":11902,"Ġbabies":11903,"Ġseeds":11904,"ĠScottish":11905,"Ġthy":11906,"]]":11907,"ĠHitler":11908,"PH":11909,"ngth":11910,"Ġrecovered":11911,"inge":11912,"Ġpowder":11913,"Ġlips":11914,"Ġdesigner":11915,"Ġdisorders":11916,"Ġcourage":11917,"Ġchaos":11918,"\"},{\"":11919,"Ġcarrier":11920,"bably":11921,"High":11922,"ĠRT":11923,"esity":11924,"len":11925,"Ġroutes":11926,"uating":11927,"Fil":11928,"NOT":11929,"wall":11930,"sburgh":11931,"Ġengaging":11932,"ĠJavaScript":11933,"orer":11934,"lihood":11935,"Ġunions":11936,"ĠFederation":11937,"ĠTesla":11938,"Ġcompletion":11939,"ĠTa":11940,"Ġprivilege":11941,"ĠOrange":11942,"Ġneur":11943,"parency":11944,"Ġbones":11945,"Ġtitled":11946,"Ġprosecutors":11947,"ĠME":11948,"Ġengineer":11949,"ĠUniverse":11950,"ĠHig":11951,"nie":11952,"oard":11953,"Ġhearts":11954,"ĠGre":11955,"ussion":11956,"Ġministry":11957,"Ġpenet":11958,"ĠNut":11959,"ĠOw":11960,"ĠXP":11961,"instein":11962,"Ġbulk":11963,"System":11964,"icism":11965,"ĠMarketable":11966,"Ġpreval":11967,"Ġposter":11968,"Ġattending":11969,"urable":11970,"Ġlicensed":11971,"ĠGh":11972,"etry":11973,"ĠTradable":11974,"Ġblast":11975,"à¤":11976,"ĠTitan":11977,"elled":11978,"die":11979,"Have":11980,"ĠFlame":11981,"Ġprofound":11982,"Ġparticipating":11983,"Ġanime":11984,"ĠEss":11985,"Ġspecify":11986,"Ġregarded":11987,"ĠSpell":11988,"Ġsons":11989,"owned":11990,"Ġmerc":11991,"Ġexperimental":11992,"lando":11993,"hs":11994,"ĠDungeon":11995,"inos":11996,"Ġcomply":11997,"ĠSystems":11998,"arth":11999,"Ġseized":12000,"local":12001,"ĠGirls":12002,"udo":12003,"oned":12004,"ĠFle":12005,"Ġconstructed":12006,"Ġhosted":12007,"Ġscared":12008,"actic":12009,"ĠIslands":12010,"ĠMORE":12011,"Ġbless":12012,"Ġblocking":12013,"Ġchips":12014,"Ġevac":12015,"Ps":12016,"Ġcorporation":12017,"Ġox":12018,"Ġlighting":12019,"Ġneighbors":12020,"ĠUb":12021,"aro":12022,"Ġbeef":12023,"ĠUber":12024,"Facebook":12025,"armed":12026,"itate":12027,"ĠRating":12028,"ĠQuick":12029,"Ġoccupied":12030,"Ġaims":12031,"ĠAdditionally":12032,"ĠInterest":12033,"Ġdramatically":12034,"Ġheal":12035,"Ġpainting":12036,"Ġengineers":12037,"MM":12038,"ĠMust":12039,"Ġquantity":12040,"Paul":12041,"Ġearnings":12042,"ĠPosts":12043,"stra":12044,"ãĥ¼ãĥ":12045,"Ġstance":12046,"Ġdropping":12047,"script":12048,"Ġdressed":12049,"Make":12050,"Ġjustify":12051,"ĠLtd":12052,"Ġprompted":12053,"Ġscrut":12054,"Ġspeeds":12055,"ĠGiants":12056,"omer":12057,"ĠEditor":12058,"Ġdescribing":12059,"ĠLie":12060,"mented":12061,"Ġnowhere":12062,"ocaly":12063,"Ġinstruction":12064,"fortable":12065,"Ġentities":12066,"Ġcm":12067,"ĠNatural":12068,"Ġinquiry":12069,"Ġpressed":12070,"izont":12071,"forced":12072,"Ġraises":12073,"ĠNetflix":12074,"ĠSide":12075,"Ġouter":12076,"Ġamongst":12077,"ims":12078,"owski":12079,"Ġclimb":12080,"never":12081,"Ġcombine":12082,"ding":12083,"Ġcompr":12084,"Ġsignificance":12085,"Ġremembered":12086,"ĠNevada":12087,"ĠTel":12088,"ĠScar":12089,"ĠWarriors":12090,"ĠJane":12091,"Ġcoup":12092,"bas":12093,"Ġterminal":12094,",-":12095,"OH":12096,"Ġtension":12097,"Ġwings":12098,"ĠMyster":12099,"����":12100,"ĠUnlike":12101,"valid":12102,"vironments":12103,"ĠAli":12104,"Ġnaked":12105,"books":12106,"ĠMun":12107,"ĠGulf":12108,"Ġdensity":12109,"Ġdimin":12110,"Ġdesperate":12111,"Ġpresidency":12112,"Ġ1986":12113,"hy":12114,"IND":12115,"Ġunlock":12116,"imens":12117,"Ġhandled":12118,"ĠEb":12119,"Ġdisappeared":12120,"Ġgenre":12121,"Ġ1988":12122,"Ġdetermination":12123,"Stream":12124,"iko":12125,"apters":12126,"Ġacknowledge":12127,"Jan":12128,"Ġcapitalism":12129,"Pat":12130,"Ġ2020":12131,"Ġpainful":12132,"Ġcurve":12133,"Ġbombs":12134,"storm":12135,"ĠMetal":12136,"encer":12137,"ĠFig":12138,"ĠAaron":12139,"anches":12140,"Ġinspiration":12141,"Ġexhaust":12142,"tains":12143,"ashi":12144,"Ġdescript":12145,"Ġritual":12146,"ĠChelsea":12147,"Ġpromotion":12148,"ĠHung":12149,"ĠWard":12150,"iva":12151,"ĠET":12152,"Ġtoss":12153,"allow":12154,"ĠFrancis":12155,"Dep":12156,"Ġhappiness":12157,"ĠGlass":12158,"Ġbeta":12159,"Ġstrengthen":12160,"NE":12161,"oa":12162,"Ġbuttons":12163,"ĠMurray":12164,"Ġkicked":12165,"Quest":12166,"ĠTalk":12167,"ĠSeveral":12168,"ĠZero":12169,"Ġdrone":12170,"ulk":12171,"Ġcam":12172,"ĠMobile":12173,"Ġpreventing":12174,"Ġretro":12175,"ĠAx":12176,"Ġcruel":12177,"Ġfloat":12178,".),":12179,"Ġfiling":12180,"ĠGrant":12181,"ĠBor":12182,"Ġrib":12183,"Ġchampionship":12184,"ĠMerc":12185,"Ġstyles":12186,"Ġcake":12187,"Ġbuilds":12188,"ĠSelf":12189,"iox":12190,"Ġepic":12191,"oyd":12192,"Bel":12193,"ĠStew":12194,".(":12195,"ahu":12196,"ĠBeyond":12197,"Ġouts":12198,"Ġsolo":12199,"ĠTree":12200,"Ġpreserve":12201,"Ġtub":12202,"ARE":12203,"roc":12204,"ĠImpro":12205,"ĠWright":12206,"Ġbund":12207,"Ġtraged":12208,"Ġoccasional":12209,"bian":12210,"Second":12211,"rons":12212,"Ġinteractions":12213,"formed":12214,"sing":12215,"Ġowns":12216,"Ġhockey":12217,"General":12218,"Ġlogical":12219,"Ġexpend":12220,"Ġescal":12221,"ĠGriff":12222,"ĠCrown":12223,"ĠReserve":12224,"Ġstopping":12225,"Ġexcuse":12226,"second":12227,"Ġoperated":12228,"Ġreaches":12229,"ĠMalays":12230,"Ġpollution":12231,"ĠBrooklyn":12232,"Ġdelete":12233,"Ġhash":12234,"Block":12235,"aha":12236,"âĢ³":12237,"Ġshorter":12238,"piece":12239,">>>":13163,"ĠMormon":13164,"tor":13165,"Ġparticles":13166,"ĠBart":13167,"ryption":13168,"Ġadmin":13169,"Ġsquee":13170,"VIDIA":13171,"Ġcreator":13172,"iameter":13173,"icular":13174,"NBC":13175,"Ġgrabbed":13176,"Ġnodd":13177,"Ġrated":13178,"Ġrotation":13179,"Ġgrasp":13180,"Ġexcessive":13181,"ĠEC":13182,"ĠWhit":13183,"Ġinventory":13184,"aults":13185,"ĠFB":13186,"Ġecosystem":13187,"Ġbillions":13188,"Ġventure":13189,"named":13190,"Ġdefender":13191,"oute":13192,"Instead":13193,"irable":13194,"War":13195,"Ġassumption":13196,"Ġbite":13197,"Ġearthqu":13198,"tail":13199,"space":13200,"Ġgifts":13201,"boys":13202,"Ġinevitable":13203,"Ġstructural":13204,"Ġbeneficial":13205,"Ġcompelling":13206,"hole":13207,"ervation":13208,"Ġcoat":13209,"oj":13210,"incarn":13211,"ĠYears":13212,"Ġdetermining":13213,"Ġrhetoric":13214,"Ġboundaries":13215,"Ġwhites":13216,"Ant":13217,"addy":13218,")-":13219,"raham":13220,"etermin":13221,"Ġharvest":13222,"ĠConc":13223,"Ġlaptop":13224,"ĠMatch":13225,"Ġenjoying":13226,"cca":13227,"ollar":13228,"Ġtrips":13229,"Ġaddiction":13230,"ĠSak":13231,"Ġpowered":13232,"Ġcous":13233,"ĠRussians":13234,"iere":13235,"Ġretrie":13236,"quality":13237,"Ġdiffer":13238,"Ġkingdom":13239,"ĠLaur":13240,"ĠCapitol":13241,"Ġconclusions":13242,"ĠAltern":13243,"ĠNav":13244,"Ġtransparent":13245,"BER":13246,"Group":13247,"ĠComplete":13248,"Ġinfer":13249,"Ġintrig":13250,"Ġinsane":13251,"RO":13252,"ophob":13253,"isen":13254,"qual":13255,"Michael":13256,"Ġmuseum":13257,"ĠPope":13258,"Ġreset":13259,"rative":13260,"five":13261,"Ġaggreg":13262,"ittees":13263,"ository":13264,"Ġcarb":13265,"ĠRecord":13266,"Ġdecides":13267,"ĠFix":13268,"Ġexceptions":13269,"ĠCommissioner":13270,"uns":13271,"ĠEnvironmental":13272,"Ġlegendary":13273,"istence":13274,"Ġtunnel":13275,"km":13276,"Ġinsult":13277,"Ġtroll":13278,"Ġshake":13279,"Ġdetention":13280,"ques":13281,"ĠChrome":13282,"ĠFiles":13283,"Ġsubt":13284,"Ġprospects":13285,"Ġprol":13286,"render":13287,"proof":13288,"Ġperformances":13289,"Str":13290,"Ġhref":13291,"ername":13292,"Ġachievement":13293,"Ġfut":13294,"Full":13295,"ĠLeban":13296,"google":13297,"ãĥĪ":13298,"ampa":13299,"Maybe":13300,"Ġprojected":13301,"ĠEmb":13302,"Ġcolleg":13303,"Ġawards":13304,"ĠâĶ":13305,"Gold":13306,"ĠBlake":13307,"ĠRaj":13308,"ifting":13309,"Ġpending":13310,"Ġinstinct":13311,"Ġdevelopments":13312,"Connect":13313,"ĠMand":13314,"ĠWITH":13315,"ĠPhilippines":13316,"profile":13317,"Ġaltogether":13318,"ĠBund":13319,"ĠTD":13320,"oooo":13321,"amped":13322,"iph":13323,"Ġsteam":13324,"Ġoldest":13325,"Ġdetection":13326,"ulpt":13327,"Ġç":13328,"ĠWayne":13329,"2006":13330,"fa":13331,"Ġcircles":13332,"ĠFu":13333,"Ġdonors":13334,"appropriate":13335,"ĠDakota":13336,"jamin":13337,"Ġmotivated":13338,"Ġpurchases":13339,"ĠLouisiana":13340,"ĠSpl":13341,"Ġglobe":13342,"Ġ105":13343,"zip":13344,"call":13345,"Ġdepartments":13346,"Ġsustainable":13347,"105":13348,"ĠOP":13349,"ifiers":13350,"Ġprevented":13351,"Ġincomp":13352,"ĠCommander":13353,"Ġdominated":13354,"Ġ»":13355,"Ġinvested":13356,"Ġcomplexity":13357,"Ġincl":13358,"Ġensuring":13359,"Ġrealm":13360,"ync":13361,"ĠIndependent":13362,"rained":13363,"ĠJen":13364,"ĠFlight":13365,"Ġathe":13366,"Ġspeculation":13367,"ĠTE":13368,"ocate":13369,"tic":13370,"Ġplaint":13371,"herry":13372,"Ġtoy":13373,"Ġ111":13374,"Ġplates":13375,"status":13376,"ĠIsa":13377,"Ġdevoted":13378,"Cop":13379,"ĠES":13380,"255":13381,"urrency":13382,"Main":13383,"Ġslaves":13384,"Ġpepper":13385,"Ġquotes":13386,"Ġceiling":13387,"ĠFish":13388,"Ġtransformation":13389,"Ġfraction":13390,"Ġadvantages":13391,"Ġtoile":13392,"Ġstunning":13393,"Ġmoist":13394,"breaking":13395,"si":13396,"ĠLocation":13397,"ĠMedium":13398,"Ġtexts":13399,"Ġugly":13400,"Ġbio":13401,".âĢĶ":13402,"ĠBased":13403,"Ġtrains":13404,"ĠWing":13405,"ĠAncient":13406,"ĠRecords":13407,"ĠHope":13408,"Special":13409,"adesh":13410,"obi":13411,"[/":13412,"Ġtemporarily":13413,"Ver":13414,"hu":13415,"oser":13416,"Ġovernight":13417,"Ġmamm":13418,"ĠTreasury":13419,"ĠVenezuel":13420,"ĠMega":13421,"Ġtar":13422,"Ġexpects":13423,"black":13424,"orph":13425,"\\\\\\\\":13426,"Ġacceptance":13427,"Ġradar":13428,"sis":13429,"Ġjunior":13430,"Ġframes":13431,"Ġobservation":13432,"acies":13433,"Power":13434,"ĠAdvanced":13435,"Mag":13436,"ologically":13437,"ĠMechan":13438,"Ġsentences":13439,"Ġanalysts":13440,"aughters":13441,"forcement":13442,"Ġvague":13443,"Ġclause":13444,"Ġdirectors":13445,"Ġevaluate":13446,"Ġcabinet":13447,"Matt":13448,"ĠClassic":13449,"Ang":13450,"Ġcler":13451,"ĠBuck":13452,"Ġresearcher":13453,"Ġ160":13454,"Ġpoorly":13455,"Ġexperiencing":13456,"ĠPed":13457,"ĠManhattan":13458,"Ġfreed":13459,"Ġthemes":13460,"advant":13461,"Ġnin":13462,"Ġpraise":13463,"104":13464,"ĠLibya":13465,"best":13466,"Ġtrusted":13467,"Ġcease":13468,"Ġdign":13469,"Direct":13470,"Ġbombing":13471,"Ġmigration":13472,"ĠSciences":13473,"Ġmunicipal":13474,"ĠAverage":13475,"Ġglory":13476,"Ġrevealing":13477,"Ġarena":13478,"Ġuncertainty":13479,"Ġbattlefield":13480,"iao":13481,"God":13482,"Ġcinem":13483,"rape":13484,"elle":13485,"apons":13486,"Ġlisting":13487,"Ġwaited":13488,"Ġspotted":13489,"keley":13490,"ĠAudio":13491,"eor":13492,"arding":13493,"idding":13494,"igma":13495,"ĠNeg":13496,"Ġlone":13497,"Ġ----":13498,"exe":13499,"deg":13500,"Ġtransf":13501,"Ġwash":13502,"Ġslavery":13503,"Ġexploring":13504,"ĠWW":13505,"atson":13506,"Ġencl":13507,"lies":13508,"ĠCreek":13509,"Ġwooden":13510,"Manager":13511,"ĠBrand":13512,"ummy":13513,"ĠArthur":13514,"Ġbureaucr":13515,"Ġblend":13516,"arians":13517,"Further":13518,"Ġsupposedly":13519,"Ġwinds":13520,"Ġ1979":13521,"Ġgravity":13522,"Ġanalyses":13523,"ĠTravel":13524,"ĠVeter":13525,"Ġdumb":13526,"Ġalternate":13527,"gal":13528,"Ġconsumed":13529,"Ġeffectiveness":13530,".''":13531,"Ġpaths":13532,"onda":13533,"LA":13534,"ĠStrong":13535,"Ġenables":13536,"Ġescaped":13537,"Ġ\"\"":13538,"Ġ112":13539,"Ġ1983":13540,"Ġsmiled":13541,"Ġtendency":13542,"Fire":13543,"Ġpars":13544,"ĠRoc":13545,"Ġlake":13546,"Ġfitness":13547,"ĠAth":13548,"ĠHorn":13549,"Ġhier":13550,"Ġimpose":13551,"mother":13552,"Ġpension":13553,"icut":13554,"borne":13555,"iciary":13556,"._":13557,"ĠSU":13558,"Ġpolar":13559,"isy":13560,"engu":13561,"itialized":13562,"ATA":13563,"write":13564,"Ġexercises":13565,"ĠDiamond":13566,"otypes":13567,"Ġharmful":13568,"onz":13569,"Ġprinting":13570,"story":13571,"Ġexpertise":13572,"ĠGer":13573,"Ġtragedy":13574,"ĠFly":13575,"Ġdivid":13576,"ampire":13577,"stock":13578,"Mem":13579,"Ġreign":13580,"Ġunve":13581,"Ġamend":13582,"ĠProphet":13583,"Ġmutual":13584,"ĠFac":13585,"Ġreplacing":13586,"Har":13587,"ĠCircuit":13588,"Ġthroat":13589,"ĠShot":13590,"Ġbatteries":13591,"Ġtoll":13592,"Ġaddressing":13593,"ĠMedicaid":13594,"Ġpupp":13595,"ĠNar":13596,"olk":13597,"Ġequity":13598,"MR":13599,"ĠHispan":13600,"ĠLarge":13601,"mid":13602,"Dev":13603,"Ġexped":13604,"Ġdemo":13605,"ĠMarshall":13606,"ergus":13607,"Ġfiber":13608,"Ġdivorce":13609,"ĠCreate":13610,"Ġslower":13611,"ĠParker":13612,"ĠStudent":13613,"ĠTraining":13614,"Return":13615,"ĠTru":13616,"Ġcub":13617,"ĠReached":13618,"Ġpanic":13619,"Ġquarters":13620,"Ġrect":13621,"Ġtreating":13622,"Ġrats":13623,"ĠChristianity":13624,"oler":13625,"Ġsacred":13626,"Ġdeclare":13627,"ulative":13628,"eting":13629,"Ġdelivering":13630,"estone":13631,"Ġtel":13632,"ĠLarry":13633,"Ġmeta":13634,"accept":13635,"artz":13636,"ĠRoger":13637,"handed":13638,"Ġheader":13639,"Ġtrapped":13640,"ĠCentury":13641,"Ġknocked":13642,"ĠOxford":13643,"Ġsurvivors":13644,"bot":13645,"Ġdemonstration":13646,"Ġdirt":13647,"Ġassists":13648,"OME":13649,"ĠDraft":13650,"ortunate":13651,"folio":13652,"pered":13653,"usters":13654,"gt":13655,"ĠLock":13656,"Ġjudicial":13657,"verted":13658,"Ġsecured":13659,"outing":13660,"ĠBooks":13661,"Ġhosting":13662,"Ġlifted":13663,"length":13664,"Ġjer":13665,"Ġwheels":13666,"ĠRange":13667,"umbnails":13668,"Ġdiagnosis":13669,"tech":13670,"ĠStewart":13671,"ĠPract":13672,"Ġnationwide":13673,"Ġdear":13674,"Ġobligations":13675,"Ġgrows":13676,"Ġmandatory":13677,"Ġsuspicious":13678,"!'":13679,"Apr":13680,"Great":13681,"Ġmortgage":13682,"Ġprosecutor":13683,"Ġeditorial":13684,"ĠKr":13685,"Ġprocessed":13686,"ungle":13687,"Ġflexibility":13688,"Earlier":13689,"ĠCart":13690,"ĠSug":13691,"Ġfocuses":13692,"Ġstartup":13693,"Ġbreach":13694,"ĠTob":13695,"cycle":13696,"ãĢĮ":13697,"rose":13698,"Ġbizarre":13699,"ãĢį":13700,"Ġvegetables":13701,"$$":13702,"Ġretreat":13703,"oshi":13704,"ĠShop":13705,"ĠGround":13706,"ĠStop":13707,"ĠHawaii":13708,"ĠAy":13709,"Perhaps":13710,"ĠBeaut":13711,"uffer":13712,"enna":13713,"Ġproductivity":13714,"Fixed":13715,"control":13716,"Ġabsent":13717,"ĠCampaign":13718,"Green":13719,"Ġidentifying":13720,"Ġregret":13721,"Ġpromoted":13722,"ĠSeven":13723,"Ġeru":13724,"neath":13725,"aughed":13726,"ĠPin":13727,"ĠLiving":13728,"Cost":13729,"omatic":13730,"mega":13731,"ĠNig":13732,"ocy":13733,"Ġinbox":13734,"Ġempire":13735,"Ġhorizont":13736,"Ġbranches":13737,"Ġmetaph":13738,"Active":13739,"edi":13740,"ĠFilm":13741,"ĠSomething":13742,"Ġmods":13743,"incial":13744,"ĠOriginal":13745,"Gen":13746,"Ġspirits":13747,"Ġearning":13748,"Hist":13749,"Ġriders":13750,"Ġsacrific":13751,"MT":13752,"ĠVA":13753,"ĠSalt":13754,"Ġoccupation":13755,"ĠMi":13756,"Ġdisg":13757,"lict":13758,"Ġnit":13759,"Ġnodes":13760,"eem":13761,"ĠPier":13762,"Ġhatred":13763,"psy":13764,"ãĥī":13765,"Ġtheater":13766,"Ġsophisticated":13767,"Ġdefended":13768,"Ġbesides":13769,"Ġthoroughly":13770,"ĠMedicare":13771,"Ġblamed":13772,"arently":13773,"Ġcrying":13774,"FOR":13775,"priv":13776,"Ġsinging":13777,"ĠIl":13778,"Ġcute":13779,"oided":13780,"olitical":13781,"ĠNeuro":13782,"å¤":13783,"Ġdonation":13784,"ĠEagles":13785,"ĠGive":13786,"Tom":13787,"Ġsubstantially":13788,"ĠLicense":13789,"ĠJa":13790,"Ġgrey":13791,"ĠAnimal":13792,"ĠER":13793,"ĠUnd":13794,"Ġkeen":13795,"Ġconclude":13796,"ĠMississippi":13797,"Engine":13798,"ĠStudios":13799,"Press":13800,"overs":13801,"llers":13802,"Ġ350":13803,"ĠRangers":13804,"Ġrou":13805,"erto":13806,"Ep":13807,"issa":13808,"ivan":13809,"Ġseal":13810,"ĠRegist":13811,"display":13812,"Ġweaken":13813,"uum":13814,"ĠCommons":13815,"ĠSay":13816,"Ġcultures":13817,"Ġlaughed":13818,"Ġslip":13819,"Ġtreatments":13820,"izable":13821,"mart":13822,"ĠRice":13823,"Ġbeast":13824,"Ġobesity":13825,"ĠLaure":13826,"iga":13827,"Which":13828,"holder":13829,"Ġelderly":13830,"Ġpays":13831,"Ġcomplained":13832,"Ġcrop":13833,"Ġproc":13834,"Ġexplosive":13835,"ĠFan":13836,"ĠArsenal":13837,"Author":13838,"eful":13839,"Ġmeals":13840,"Ġ(-":13841,"idays":13842,"Ġimagination":13843,"Ġannually":13844,"Ġms":13845,"asures":13846,"Head":13847,"ikh":13848,"matic":13849,"Ġboyfriend":13850,"ĠComputer":13851,"Ġbump":13852,"Ġsurge":13853,"ĠCraig":13854,"ĠKirk":13855,"Del":13856,"mediate":13857,"Ġscenarios":13858,"ĠMut":13859,"ĠStream":13860,"Ġcompetitors":13861,"ÙĦ":13862,"ĠStanford":13863,"ĠResources":13864,"azed":13865,"bage":13866,"Ġorganis":13867,"ĠRelease":13868,"Ġseparately":13869,"Ġhabits":13870,"Ġmeasurements":13871,"ĠClose":13872,"Ġaccompany":13873,"Ġgly":13874,"Ġtang":13875,"ĠRou":13876,"Ġplugin":13877,"Ġconvey":13878,"ĠChallenge":13879,"oots":13880,"jan":13881,"Ġcurs":13882,"ĠRelations":13883,"keeper":13884,"Ġapproaching":13885,"ping":13886,"Speaking":13887,"Ġarrangement":13888,"ĠVI":13889,"arettes":13890,"Ġaffecting":13891,"Ġpermits":13892,"because":13893,"Ġuseless":13894,"ĠHus":13895,"!!!!":13896,"Ġdestroying":13897,"Unfortunately":13898,"Ġfascinating":13899,"Sem":13900,"Ġelectoral":13901,"Ġtransparency":13902,"ĠChaos":13903,"Ġvolunteer":13904,"Ġstatistical":13905,"Ġactivated":13906,"rox":13907,"Web":13908,"HE":13909,"ĠHampshire":13910,"isive":13911,"Map":13912,"Ġtrash":13913,"ĠLawrence":13914,"stick":13915,"Cr":13916,"Ġrings":13917,"EXT":13918,"Ġoperational":13919,"opes":13920,"Does":13921,"ĠEvans":13922,"Ġwitnessed":13923,"Port":13924,"Ġlaunching":13925,"econom":13926,"wear":13927,"ĠParticip":13928,"umm":13929,"cules":13930,"ĠRAM":13931,"ĠTun":13932,"Ġassured":13933,"Ġbinary":13934,"Ġbetray":13935,"Ġexploration":13936,"ĠFel":13937,"Ġadmission":13938,"itated":13939,"Sy":13940,"Ġavoided":13941,"ĠSimulator":13942,"Ġcelebrated":13943,"ĠElectric":13944,"¥ŀ":13945,"Ġcluster":13946,"itzerland":13947,"health":13948,"Line":13949,"ĠNash":13950,"aton":13951,"Ġspare":13952,"Ġenterprise":13953,"ĠDIS":13954,"cludes":13955,"Ġflights":13956,"Ġregards":13957,"ĠÃĹ":13958,"half":13959,"Ġtrucks":13960,"Ġcontacts":13961,"Ġuncons":13962,"ĠClimate":13963,"Ġimmense":13964,"NEW":13965,"occ":13966,"ective":13967,"Ġembod":13968,"Ġpatrol":13969,"Ġbeside":13970,"Ġviable":13971,"Ġcreep":13972,"Ġtriggered":13973,"verning":13974,"Ġcomparable":13975,"ql":13976,"Ġgaining":13977,"asses":13978,"Ġ();":13979,"ĠGrey":13980,"ĠMLS":13981,"sized":13982,"Ġprosper":13983,"\"?":13984,"Ġpolling":13985,"Ġshar":13986,"ĠRC":13987,"Ġfirearm":13988,"orient":13989,"Ġfence":13990,"Ġvariations":13991,"giving":13992,"ĠPi":13993,"ospel":13994,"Ġpledge":13995,"Ġcure":13996,"Ġspy":13997,"Ġviolated":13998,"Ġrushed":13999,"Ġstroke":14000,"ĠBlog":14001,"sels":14002,"ĠEc":14003,",''":14004,"Ġpale":14005,"ĠCollins":14006,"terror":14007,"ĠCanadians":14008,"Ġtune":14009,"Ġlaboratory":14010,"Ġnons":14011,"tarian":14012,"Ġdisability":14013,"ĠGam":14014,"Ġsinger":14015,"alg":14016,"ĠSenior":14017,"Ġtraded":14018,"ĠWarrior":14019,"Ġinfring":14020,"ĠFranklin":14021,"Ġstrain":14022,"ĠSwedish":14023,"Ġseventh":14024,"ĠBenn":14025,"ĠTell":14026,"Ġsyndrome":14027,"Ġwondered":14028,"iden":14029,"++++":14030,"igo":14031,"Ġpurple":14032,"Ġjournalism":14033,"Ġrebel":14034,"Ġfu":14035,"blog":14036,"Ġinvite":14037,"rencies":14038,"ĠContact":14039,"Israel":14040,"ĠContent":14041,"Ġcheer":14042,"Ġbedroom":14043,"ĠEngineering":14044,"ĠQueens":14045,"Ġdwell":14046,"ĠPlayStation":14047,"ĠDim":14048,"ĠColon":14049,"lr":14050,"Ġoperates":14051,"Ġmotivation":14052,"USA":14053,"astered":14054,"Core":14055,"ĠTruth":14056,"olo":14057,"OSE":14058,"ĠMemory":14059,"Ġpredec":14060,"Ġanarch":14061,"Ġ1920":14062,"ĠYam":14063,"è":14064,"bid":14065,"Ġgrateful":14066,"Ġexcitement":14067,"Ġtreasure":14068,"Ġlongest":14069,"ctive":14070,"Ġdeserves":14071,"Ġreserves":14072,"Ġcops":14073,"ĠOttawa":14074,"ĠEgyptian":14075,"anked":14076,"Ġartif":14077,"Ġhypothesis":14078,":/":14079,"Ġpurchasing":14080,"Ġlovely":14081,"HP":14082,"Ġdivide":14083,"Ġstrictly":14084,"Ġquestioning":14085,"Ġtaxpayers":14086,"ĠJoy":14087,"Ġrolls":14088,"ĠHeavy":14089,"Ġports":14090,"Ġmagnetic":14091,"Ġinflamm":14092,"Ġbrush":14093,"tics":14094,"âĪĴ":14095,"Ġbottles":14096,"ppy":14097,"Ġpadd":14098,"ãĤ¯":14099,"million":14100,"Ġdevastating":14101,"Ġcompiled":14102,"Ġmedication":14103,"Ġtwelve":14104,"ĠPerry":14105,"Space":14106,"imb":14107,"your":14108,"Ġleaked":14109,"ĠTar":14110,"Ġunity":14111,"Ġinfected":14112,"Ġtraveled":14113,"IDE":14114,"ĠMcDonald":14115,"txt":14116,"ĠPrinc":14117,"Ġinterven":14118,"ĠTaiwan":14119,"ĠPow":14120,"Ġbearing":14121,"ĠThread":14122,"Ġzones":14123,"izards":14124,"unks":14125,"Chapter":14126,"llor":14127,"Ġ·":14128,"Ġwounds":14129,"Ġdiscretion":14130,"Ġsucceeded":14131,"iking":14132,"Ġiconic":14133,"Call":14134,"Ġscreening":14135,"ĠMis":14136,"icts":14137,"Ġministers":14138,"Ġseparation":14139,"Player":14140,"Ġbip":14141,"Ġbeloved":14142,"Ġcounting":14143,"ĠEye":14144,"around":14145,"inging":14146,"Ġtablet":14147,"Ġoffence":14148,"inance":14149,"have":14150,"ĠInfo":14151,"ĠNinja":14152,"Ġprotective":14153,"ĠCass":14154,"Mac":14155,"ĠQuality":14156,"North":14157,"Ġic":14158,"ĠCuba":14159,"ĠChronicle":14160,"ĠProperty":14161,"Ġfastest":14162,"otos":14163,"ĠGerm":14164,"OWN":14165,"Ġboom":14166,"ĠStanley":14167,"erguson":14168,"Ġclever":14169,"Ġenters":14170,"mode":14171,"terior":14172,"ĠSens":14173,"Ġlinear":14174,"ARK":14175,"Ġcomparing":14176,"Ġpurely":14177,"Ġsafer":14178,"ĠPotter":14179,"Ġcups":14180,"RT":14181,"Ġgluc":14182,"Ġattributed":14183,"Ġdupl":14184,"ĠPap":14185,"Ġprecious":14186,"Ġpa":14187,"ictionary":14188,"ĠTig":14189,"ĠToo":14190,"olutions":14191,"stan":14192,"Ġrobots":14193,"Ġlobb":14194,"Ġstatute":14195,"Ġprevention":14196,"western":14197,"160":14198,"ĠActive":14199,"ĠMaria":14200,"hal":14201,"None":14202,"ellar":14203,"ĠKB":14204,"ĠPartners":14205,"ĠSingle":14206,"ĠFollowing":14207,"ango":14208,"acious":14209,"Ġthou":14210,"Ġkg":14211,"Ġinfluential":14212,"ĠFriends":14213,"Sur":14214,"ainted":14215,"Ġforums":14216,"Ġstarter":14217,"Ġcitizenship":14218,"ĠElection":14219,"onge":14220,"otation":14221,"osph":14222,";;;;":14223,"utical":14224,"pur":14225,"eren":14226,"Ġaccusations":14227,"bitious":14228,"abbit":14229,"ĠOrd":14230,"Posted":14231,"irk":14232,"Ġsensitivity":14233,"iche":14234,"ĠAmy":14235,"ĠFab":14236,"Ġsummit":14237,"Ġpedest":14238,"Ġrubber":14239,"Ġagricultural":14240,"Ġcancel":14241,"AE":14242,"Ġinaug":14243,"Ġcontam":14244,"Ġfirmly":14245,"iw":14246,"stage":14247,"ĠKan":14248,"Ġtier":14249,"Ġinvention":14250,"Ġtranslated":14251,"ĠRules":14252,"Box":14253,"Twitter":14254,"IDS":14255,"Ġpizza":14256,"Ġdebug":14257,"ĠDrop":14258,"vs":14259,"Ġhorses":14260,"big":14261,"Ġboring":14262,"Ġhood":14263,"ĠMcCain":14264,"atched":14265,"ĠBros":14266,"Ġskip":14267,"Ġessay":14268,"stat":14269,"ĠLegends":14270,"Ġammunition":14271,"auc":14272,"Ġshooter":14273,"Ġunh":14274,"Ġsupplied":14275,"Ġgeneric":14276,"ĠSK":14277,"iban":14278,"yrics":14279,"Ġ255":14280,"Ġclimbing":14281,"Former":14282,"Ġflip":14283,"Ġjumping":14284,"Ġfrustration":14285,"ĠTerry":14286,"Ġneighborhoods":14287,"Ġmedian":14288,"bean":14289,"Ġbrains":14290,"Following":14291,"Ġshaped":14292,"Ġdraws":14293,"Ġaltered":14294,"Jack":14295,"Ġrecipes":14296,"Ġskilled":14297,"wealth":14298,"achi":14299,"election":14300,"Ġbehaviors":14301,"deals":14302,"ĠUntil":14303,"Fe":14304,"Ġdeclaration":14305,"marks":14306,"ĠBetween":14307,"celona":14308,"Ġreson":14309,"Ġbubble":14310,"Among":14311,"Ġimperial":14312,"GS":14313,"Ġfeminist":14314,"2005":14315,"ĠKyle":14316,"Ġaccounting":14317,"ĠTele":14318,"ĠTyr":14319,"Ġconnecting":14320,"Ġrehab":14321,"ĠPred":14322,"sim":14323,"Ġmeantime":14324,"Ġphysician":14325,"MW":14326,"ĠCampbell":14327,"ĠBrandon":14328,"Ġcontributing":14329,"ĠRule":14330,"ĠWeight":14331,"ĠNap":14332,"Ġinteractive":14333,"Ġvag":14334,"Ġhelmet":14335,"ĠComb":14336,"four":14337,"Ġshipped":14338,"Ġcompleting":14339,"ĠPD":14340,"PDATE":14341,"Ġspreading":14342,"Ġscary":14343,"erving":14344,"ĠGas":14345,"Ġfrank":14346,"school":14347,"Ġromantic":14348,"Ġstabil":14349,"Rob":14350,"Ġaccurately":14351,"Ġacute":14352,"ĠHann":14353,"Ġsymbols":14354,"Ġcivilization":14355,"ĠAW":14356,"Ġlightning":14357,"Ġconsiders":14358,"Ġvenue":14359,"Ġ×":14360,"Ġoven":14361,"ĠSF":14362,"his":14363,"Ġnu":14364,"ĠLearn":14365,"Ġpeoples":14366,"Ġstd":14367,"Ġslee":14368,"Ġslic":14369,"ĠStatistics":14370,"Ġcorners":14371,"ĠBaker":14372,"Ġ:)":14373,"mentation":14374,"olver":14375,"Ġlaughing":14376,"ĠTodd":14377,"onde":14378,"ĠHills":14379,"Ġnuts":14380,"ĠWoman":14381,"plane":14382,"Ġliver":14383,"ĠInside":14384,"Sorry":14385,"Ġagrees":14386,"Ġfundament":14387,"ĠFisher":14388,"Ġauction":14389,"Ġthreads":14390,"glas":14391,"ĠBasic":14392,"ĠNat":14393,"Ġlacking":14394,"Ġcelebration":14395,"ju":14396,"Ġsilly":14397,"Euro":14398,"Ġtatt":14399,"ighty":14400,"controlled":14401,"Test":14402,"ĠSingh":14403,"Ġrage":14404,"Ġrhyth":14405,"offic":14406,"ĠPhantom":14407,"Ġheadlines":14408,"Ġresponding":14409,"ĠMorning":14410,"Ġvitamin":14411,"Ġboots":14412,"ĠSite":14413,"alin":14414,"pi":14415,"Ġviral":14416,"ĠUC":14417,"DER":14418,"ĠSex":14419,"Ġstocks":14420,"current":14421,"Ġchurches":14422,"ĠRare":14423,"ĠMurphy":14424,"Ġdenial":14425,"ĠGaming":14426,"Ġtoug":14427,"Ġnick":14428,"Ġmakers":14429,"ĠRonald":14430,"Ġgenerous":14431,"ĠDoc":14432,"ĠMorris":14433,"Ġtransformed":14434,"ĠNormal":14435,"Ġ104":14436,"ĠKickstarter":14437,"ĠUpon":14438,"Online":14439,"ĠIRS":14440,"Ġwrap":14441,"Ġloving":14442,"Ġarrives":14443,"ĠDue":14444,"Ġheter":14445,"ĠMade":14446,"Ġrental":14447,"Ġbelongs":14448,"Ġattorneys":14449,"Ġcrops":14450,"Ġmatched":14451,"ulum":14452,"oline":14453,"109":14454,"Ġdispar":14455,"Ġbuyers":14456,"ĠCambridge":14457,"Ġethics":14458,"roups":14459,"Ġjustified":14460,"Ġmarginal":14461,"Ġrespected":14462,"winning":14463,"Ġnodded":14464,"ĠSerge":14465,"ĠFormer":14466,"Craft":14467,"################":14468,"ĠWarner":14469,"Ġdash":14470,"ete":14471,"Ġentert":14472,"ĠEscape":14473,"outheast":14474,"Ġknees":14475,"ĠBomb":14476,"Ġrug":14477,"Pass":14478,"Ġattitudes":14479,"government":14480,"ĠPrior":14481,"Ġqualities":14482,"Ġnotification":14483,"ĠPhone":14484,"lie":14485,"Ġanticipated":14486,"ĠCombat":14487,"ĠBarry":14488,"Ġ1982":14489,"Users":14490,"oner":14491,"Ġcomputing":14492,"ĠConnecticut":14493,"Ġlesser":14494,"Ġpeers":14495,"ĠCu":14496,"Ġtechnically":14497,"Ġsubmission":14498,"ĠUniversal":14499,"Ġmanually":14500,"ourge":14501,"Ġrespondents":14502,"ĠBTC":14503,"ĠHost":14504,"Ġfare":14505,"ĠBird":14506,"Ġreceipt":14507,"also":14508,"Ġjack":14509,"Ġagriculture":14510,"Ġskull":14511,"Ġ!=":14512,"Ġpassive":14513,"ĠCI":14514,"Ġsocieties":14515,"Ġreminded":14516,"Ġinterference":14517,"Buy":14518,"Ġâľ":14519,"gon":14520,"Ġscrutiny":14521,"ĠWitch":14522,"Ġconducting":14523,"Ġãĥ":14524,"Ġexchanges":14525,"ĠMitchell":14526,"Ġinhabit":14527,"Ġtwist":14528,"BD":14529,"Ġwherever":14530,"groupon":14531,"Ġjokes":14532,"ĠBenjamin":14533,"ĠRandom":14534,"frame":14535,"ĠLions":14536,"Ġhighlighted":14537,"ĠArkansas":14538,"Ent":14539,"Ġpile":14540,"Ġprelim":14541,"gs":14542,"minded":14543,"Ġfelony":14544,"ĠGA":14545,"ĠLuck":14546,"Ġpractically":14547,"ĠBos":14548,"Ġactress":14549,"Dam":14550,"ĠBou":14551,"Ġvisa":14552,"Ġembedded":14553,"Ġhybrid":14554,"Ġearliest":14555,"Ġsooner":14556,"social":14557,"ĠHA":14558,"Ġsteep":14559,"Ġdisadvant":14560,"Ġexploit":14561,"ĠEgg":14562,"ĠUltra":14563,"Ġnecessity":14564,"Local":14565,"iege":14566,"Ġdated":14567,"Ġmasses":14568,"Ġsubscription":14569,"pless":14570,"Ġanonym":14571,"Ġpresumably":14572,"Blue":14573,"Their":14574,"asketball":14575,"ĠPhilip":14576,"Ġcomed":14577,"loaded":14578,"rane":14579,"Ġreflection":14580,"China":14581,"Ġextends":14582,"Ġforming":14583,"Ġunders":14584,"2001":14585,"Ġgrat":14586,"Ġconcentrations":14587,"Ġinsulin":14588,"Ġsecular":14589,"Ġwhilst":14590,"Ġwinners":14591,"Advertisements":14592,"Ġdeliberately":14593,"ĠWorking":14594,"Ġsink":14595,"etics":14596,"dale":14597,"Ġmandate":14598,"Ġgram":14599,"Ġvacation":14600,"Ġwarnings":14601,"ripp":14602,"ĠTHAT":14603,"Ġcommentary":14604,"Ġintu":14605,"Ġaest":14606,"Ġreasoning":14607,"Ġbreakdown":14608,"ĠZombie":14609,"Ġ-->":14610,"ĠPolitical":14611,"cott":14612,"Ġthrust":14613,"Ġtechnological":14614,"Ġdeciding":14615,"Ġtrafficking":14616,"Long":14617,"Welcome":14618,"prising":14619,"ĠCommunications":14620,"Ġendors":14621,"Ġswift":14622,"Ġmetabol":14623,"coins":14624,"resa":14625,"ĠHTTP":14626,"Ġenroll":14627,"ĠHappy":14628,"usr":14629,"intage":14630,"Ġ[\"":14631,"uably":14632,"ĠMaterial":14633,"Ġrepeal":14634,"Sept":14635,"kh":14636,"ĠModi":14637,"Ġunderneath":14638,"ĠIL":14639,"shore":14640,"Ġdiagnosed":14641,"aceutical":14642,"Ġshower":14643,"aux":14644,"ĠSwitch":14645,"ĠStrength":14646,"Ġjihad":14647,"national":14648,"Ġtrauma":14649,"ussy":14650,"oni":14651,"Ġconsolid":14652,"Ġcalories":14653,"ĠFlynn":14654,"agged":14655,"168":14656,"ĠPink":14657,"Ġfulfill":14658,"Ġchains":14659,"Ġnotably":14660,"ĠAV":14661,"Life":14662,"ĠChuck":14663,"mus":14664,"ĠUrban":14665,"ĠHend":14666,"Ġdeposit":14667,"ĠSad":14668,"Ġaffair":14669,"ORK":14670,"ieval":14671,"ĠFDA":14672,"Ġtrop":14673,"ĠOverall":14674,"Ġvirtue":14675,"Ġsatisfaction":14676,"aund":14677,"Ġlun":14678,"ĠSwitzerland":14679,"ĠOperation":14680,"process":14681,"Ġshook":14682,"Ġcounties":14683,"leased":14684,"ĠCharlotte":14685,"112":14686,"Ġtranscript":14687,"Ġredd":14688,"push":14689,"ĠHey":14690,"ĠAnalysis":14691,"[\"":14692,"Ġalternatives":14693,"ardless":14694,"Ġeleph":14695,"Ġprejud":14696,"ĠLeaf":14697,"Having":14698,"ĠHub":14699,"Ġexpressions":14700,"ĠVolume":14701,"Ġshocking":14702,"ĠReds":14703,"Ġreadily":14704,"Ġplanets":14705,"adata":14706,"Ġcollapsed":14707,"ĠMadrid":14708,"Ġirrit":14709,"ipper":14710,"ĠEnc":14711,"ĠWire":14712,"Ġbuzz":14713,"ĠGP":14714,"asha":14715,"Ġaccidentally":14716,"uru":14717,"Ġfrustrated":14718,"ĠSA":14719,"Ġhungry":14720,"ĠHuff":14721,"Ġlabels":14722,"anto":14723,"ĠEP":14724,"Ġbarriers":14725,")|":14726,"ĠBerkeley":14727,"ĠJets":14728,"Ġpairs":14729,"ĠLan":14730,"James":14731,"ĠBear":14732,"Ġhumor":14733,"ĠLiberty":14734,"Ġmagnitude":14735,"Ġaging":14736,"ĠMason":14737,"Ġfriendship":14738,"umbling":14739,"Ġemerge":14740,"Ġnewspapers":14741,"Ġambitious":14742,"ĠRichards":14743,"aternal":14744,"Ġ1981":14745,"Ġcookies":14746,"Ġsculpt":14747,"Ġpursuit":14748,"Location":14749,"Ġscripts":14750,"pc":14751,"Ġarrangements":14752,"Ġdiameter":14753,"Ġloses":14754,"amation":14755,"Ġliqu":14756,"ĠJake":14757,"arette":14758,"Ġunderstands":14759,"ĠZen":14760,"vm":14761,"Ġapprove":14762,"Ġwip":14763,"Ġultra":14764,"Ġintend":14765,"ĠDI":14766,"ascular":14767,"Ġstays":14768,"ĠKor":14769,"ĠKl":14770,"Ġinvesting":14771,"La":14772,"Ġbelieving":14773,"bad":14774,"mouth":14775,"Ġtaxpayer":14776,"ãĥĥ":14777,"ĠQuebec":14778,"Ġlap":14779,"ĠSwiss":14780,"drop":14781,"Ġdrain":14782,"iri":14783,"etc":14784,"ften":14785,"ĠNex":14786,"Ġstraw":14787,"Ġscreaming":14788,"Ġcounted":14789,"Ġdamaging":14790,"Ġambassador":14791,"century":14792,"Ġprox":14793,"Ġarrests":14794,"uv":14795,"ilateral":14796,"ĠCharg":14797,"Ġprescribed":14798,"Ġindependently":14799,"Ġfierce":14800,"ĠBaby":14801,"Ġbrave":14802,"Ġsuits":14803,"=>":14804,"Ġbaseline":14805,"ĠRate":14806,"Ġislands":14807,"Ġ((":14808,"green":14809,"ixels":14810,"Ġnamely":14811,"ĠVillage":14812,"than":14813,"amy":14814,"Version":14815,"gmail":14816,"entials":14817,"ĠSud":14818,"ĠMelbourne":14819,"Ġarriving":14820,"Ġquantum":14821,"eff":14822,"ropolitan":14823,"Tri":14824,"Ġfuneral":14825,"ĠIR":14826,"ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ":14827,"ĠCob":14828,"itably":14829,"Ġturb":14830,"Ġcombo":14831,"Review":14832,"Ġdeployment":14833,"uity":14834,"ĠBott":14835,"Ġinvisible":14836,"Ġrendering":14837,"Ġunlocked":14838,"Ġaqu":14839,"ĠVladimir":14840,"Ġpad":14841,"ĠBrain":14842,"ĠLegacy":14843,"dragon":14844,"ĠKurdish":14845,"Ġsounded":14846,"Ġdetained":14847,"ĠDM":14848,"gary":14849,"Ġdaughters":14850,"Ġdisturbing":14851,"uka":14852,"ĠParad":14853,"Ġtast":14854,"Ġunfortunate":14855,"Ġul":14856,"emin":14857,"Ġattendance":14858,"trl":14859,"Ġparks":14860,"ĠMemorial":14861,"ĠAlice":14862,"othy":14863,"guard":14864,"ĠDise":14865,"ĠShan":14866,"ĠForum":14867,"Rich":14868,"Ġshifted":14869,"uez":14870,"Ġlighter":14871,"ĠMagn":14872,"Ġcod":14873,"Sch":14874,"hammad":14875,"Pub":14876,"350":14877,"ĠPokemon":14878,"Ġprototype":14879,"Ġunre":14880,"Base":14881,"ĠStudents":14882,"ĠReply":14883,"ĠCommunist":14884,"Ġgau":14885,"ĠTyler":14886,"IZ":14887,"Ġparticipated":14888,"Ġsuprem":14889,"ĠDetails":14890,"Ġvessels":14891,"rod":14892,"Ġtribe":14893,"keep":14894,"Ġassumptions":14895,"Ġpound":14896,"Ġcrude":14897,"ĠAvailable":14898,"Ġswimming":14899,"Ġinclusion":14900,"Ġadvances":14901,"culation":14902,"Ġconservation":14903,"Ġoverd":14904,"ĠBuffalo":14905,"Article":14906,"edge":14907,"Ġawa":14908,"ĠMadison":14909,"Ġsidew":14910,"Ġcatast":14911,"ĠKrist":14912,"ucle":14913,"ĠHighway":14914,"ĠTerror":14915,"Ġactivation":14916,"Ġunconscious":14917,"ĠSatan":14918,"ĠSusan":14919,"illery":14920,"Ġarranged":14921,"iop":14922,"Ġrumors":14923,"urring":14924,"think":14925,"ĠKeith":14926,"ĠKind":14927,"Ġavoiding":14928,"byn":14929,"nut":14930,"ĠSpeaker":14931,"rus":14932,"names":14933,"Ġguilt":14934,"ĠOlympics":14935,"Ġsail":14936,"ĠMes":14937,"levant":14938,"ĠColumbus":14939,"aft":14940,"City":14941,"South":14942,"ĠHarvey":14943,"ĠPun":14944,"Several":14945,"Ġmentally":14946,"Ġimpress":14947,"mount":14948,"ĠUbuntu":14949,"âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ":14950,"ĠSuperman":14951,"ĠMPs":14952,"Ġintentions":14953,"ĠRacing":14954,"Ġlikelihood":14955,"Ġ240":14956,"Total":14957,"Ġtoys":14958,"ĠWatson":14959,"Ġurge":14960,"Lear":14961,"ĠPaper":14962,"Ġoccurring":14963,"ĠBeng":14964,"ĠCert":14965,"Ġstones":14966,"Tim":14967,"ĠTwin":14968,"zb":14969,"ĠDynam":14970,"Ġpolitician":14971,"kens":14972,"ĠEnterprise":14973,"UTERS":14974,"Ġabol":14975,"Ġrefresh":14976,"Ġarbitrary":14977,"pection":14978,"Ġtroubles":14979,"Ġ});":14980,"tv":14981,"Ġpilots":14982,"Ġdistribute":14983,"Ġaudit":14984,"Ġpause":14985,"original":14986,"Ġrivals":14987,"£":14988,"Fig":14989,"TL":14990,"abil":14991,"rying":14992,"Lin":14993,"ioned":14994,"lon":14995,"Ġfancy":14996,"Ġcrashed":14997,"Ġtract":14998,"Ġshed":14999,"Ġconsume":15000,"Based":15001,"download":15002,"init":15003,"Ġvoltage":15004,"Introdu":15005,"Ġcondemned":15006,"ĠFinance":15007,"respect":15008,"Ġexcluded":15009,"Ġestablishing":15010,"heric":15011,"Ġheritage":15012,"Ġspectacular":15013,"Ġunst":15014,"ĠSnowden":15015,"ĠLane":15016,"San":15017,"Ġprotections":15018,"struction":15019,"incinn":15020,"Ġmacro":15021,"Custom":15022,"iosity":15023,"Ġesp":15024,"Ġfunctioning":15025,"Ġmush":15026,"Ġpuzzle":15027,"Ġethical":15028,"Mal":15029,"Ġgoverning":15030,"ĠFerguson":15031,"Ġrestored":15032,"Ġstressed":15033,"ĠCounter":15034,"ĠKas":15035,"clip":15036,"ANS":15037,"Ġseiz":15038,"UK":15039,"byss":15040,"oldown":15041,"api":15042,"Ġpermanently":15043,"ounters":15044,"West":15045,"Through":15046,"Light":15047,"atoes":15048,"Ġneat":15049,"Ġcord":15050,"urer":15051,"Ġseverely":15052,"ĠAven":15053,"Ġinterrog":15054,"Ġtriple":15055,"Given":15056,"Number":15057,"Ġarise":15058,"Ġsher":15059,"plant":15060,"Ġflower":15061,"ĠCou":15062,"Ġate":15063,"Ġnewer":15064,"bul":15065,"Ġmeanwhile":15066,"ĠLair":15067,"Ġadjustment":15068,"ĠCopyright":15069,"Ġdivers":15070,"iological":15071,"Ġgamers":15072,"oat":15073,"Ġhistorically":15074,"Ġanalog":15075,"Ġlongtime":15076,"Ġprescription":15077,"ĠMist":15078,"ĠHyper":15079,"ĠMaine":15080,"ĠDeity":15081,"Ġmultipl":15082,"ĠReincarn":15083,"ĠHyd":15084,"ĠPic":15085,"Sil":15086,"rants":15087,"ĠCris":15088,".;":15089,"({":15090,"ependence":15091,"Ġrecy":15092,"ateur":15093,"Ġquad":15094,"Ġglob":15095,"Ġconced":15096,"team":15097,"Ġcapitalist":15098,"ĠLot":15099,"Ġroyal":15100,"ĠCyber":15101,"Ġblacks":15102,"metic":15103,"riv":15104,"ĠDanny":15105,"Ġspo":15106,"ĠRO":15107,"Ġanimated":15108,"rypted":15109,"ĠDeputy":15110,"Ġrendered":15111,"FE":15112,"Ġstreak":15113,"Ġclouds":15114,"ĠDoug":15115,"~~~~~~~~":15116,"Ġdiscour":15117,"ĠVeh":15118,"Ġpsychology":15119,"ĠJourney":15120,"Ġcrystal":15121,"ĠFrost":15122,"Ġsuspicion":15123,"Ġrelate":15124,"orus":15125,"ĠCrypt":15126,"ĠNVIDIA":15127,"comed":15128,"uting":15129,"incinnati":15130,"Ġvulnerability":15131,"ostic":15132,"Ġisolation":15133,"Ġcooling":15134,"ĠCoalition":15135,"Ġ119":15136,"Four":15137,"ĠDeal":15138,"Ġâī":15139,"semble":15140,"rament":15141,"ĠBarcelona":15142,"Ġ102":15143,"Ġcocaine":15144,"ocalypse":15145,"Feb":15146,"ogenic":15147,"Ġmutation":15148,"Ġcryptoc":15149,"ĠKel":15150,"ĠGit":15151,"ais":15152,"Ġsisters":15153,"ANK":15154,"Ġactivate":15155,"Ter":15156,"Ġdread":15157,"ylon":15158,"Ġpropri":15159,"Aust":15160,"ĠDefault":15161,"Ġoutdoor":15162,"Ġsheer":15163,"ceive":15164,"Ġgently":15165,"о":15166,"Program":15167,"ĠâĨĴ":15168,"Ġvegan":15169,"ĠCrus":15170,"Ġresponsibilities":15171,"ĠHR":15172,"OLD":15173,"Ġprevents":15174,"Ġstiff":15175,"ĠWere":15176,"Ġathletic":15177,"ĠScore":15178,"Ġ):":15179,"Ġcolumns":15180,"ĠLoc":15181,"available":15182,"ĠFram":15183,"ĠSessions":15184,"Ġcompanion":15185,"Ġpacks":15186,"140":15187,"ĠKnights":15188,"Ġfart":15189,"Ġstreams":15190,"Ġshore":15191,"Ġappeals":15192,"ĠPerformance":15193,"haul":15194,"ĠStra":15195,"ĠNag":15196,"103":15197,"ĠTransportation":15198,"BB":15199,"Ev":15200,"zan":15201,"Public":15202,"Ġtwin":15203,"ulsion":15204,"Mult":15205,"Ġelectro":15206,"Ġstatue":15207,"ationally":15208,"ĠNort":15209,"Ġinspection":15210,"/*":15211,"igue":15212,"Ġcompassion":15213,"ĠTales":15214,"ĠStein":15215,"ĠScreen":15216,"ĠBug":15217,"ĠLion":15218,"girl":15219,"Ġwithdrawal":15220,"Ġobjectives":15221,"Ġbloody":15222,"Ġpreliminary":15223,"Ġjacket":15224,"Ġdimensions":15225,"ĠCool":15226,"ĠOccup":15227,"Ġwreck":15228,"Ġdoubled":15229,"anking":15230,"Ġ1975":15231,"Ġglasses":15232,"ĠWang":15233,"prov":15234,"Path":15235,"connected":15236,"ĠMulti":15237,"ĠNorway":15238,"agonist":15239,"Ġfeared":15240,"Ġtouching":15241,"Ġarguably":15242,"¯¯¯¯¯¯¯¯":15243,"ĠNCAA":15244,"chem":15245,"Ġspat":15246,"ĠWWE":15247,"ĠCel":15248,"igger":15249,"Ġattacker":15250,"ĠJoin":15251,"object":15252,"etta":15253,"Ġeliminated":15254,"det":15255,"Ġdestruct":15256,"ĠLucas":15257,"ctuary":15258,"180":15259,"ĠBrady":15260,"ĠBlues":15261,"Bay":15262,"aukee":15263,"Ġtimeline":15264,"Ġdelegates":15265,"written":15266,"ufficient":15267,"Ġshapes":15268,"Copyright":15269,"ouble":15270,"service":15271,"Ġpione":15272,"Ġcolleges":15273,"Ġrows":15274,"Ġspite":15275,"Ġassessed":15276,"360":15277,"Ġlease":15278,"Ġconfidential":15279,"cker":15280,"ĠManning":15281,"ĠVoice":15282,"Ġsealed":15283,"Ġcalculate":15284,"NO":15285,"ĠAssistant":15286,"Ġteenager":15287,"ulent":15288,"atherine":15289,"Ġmock":15290,"Ġdiamond":15291,"Ġfest":15292,"Ġswitched":15293,"Ġresume":15294,"ĠPuerto":15295,"Ġlanes":15296,"iration":15297,"ĠSimilarly":15298,"Ġrod":15299,"ĠSel":15300,"ĠPalace":15301,"ĠLimited":15302,"eous":15303,"Ġvariant":15304,"Ġward":15305,"Ġ))":15306,"Show":15307,"OOK":15308,"Alex":15309,"ĠNep":15310,"bris":15311,"ĠWikipedia":15312,"Ġexceptional":15313,"Ġmanages":15314,"ĠDraw":15315,"Again":15316,"Ġcopper":15317,"utt":15318,"Ġexports":15319,"Ġportfolio":15320,"Ġelevated":15321,"Rated":15322,"ĠOtherwise":15323,"ĠTact":15324,"ĠShel":15325,"ĠTX":15326,"\"âĢĶ":15327,"Ġresur":15328,"ĠWa":15329,"venant":15330,"Ġmonetary":15331,"people":15332,"Email":15333,"Ġfifty":15334,"ĠSweet":15335,"ĠMalaysia":15336,"Ġconfusing":15337,"ĠRio":15338,"uda":15339,"utenant":15340,"\");":15341,"Ġpraised":15342,"Ġvolumes":15343,"turn":15344,"Ġmature":15345,"Ġnonprofit":15346,"Ġpassionate":15347,"ĠPrivate":15348,"Ġ103":15349,"Ġdescend":15350,"ç¥ŀ":15351,"uffy":15352,"headed":15353,"Whether":15354,"rien":15355,"zech":15356,"beit":15357,"Ġchrom":15358,"ĠMcM":15359,"Ġdancing":15360,"Ġeleg":15361,"ĠNoticed":15362,"115":15363,"Ġadvocacy":15364,"ENTS":15365,"ambling":15366,"ĠMinor":15367,"ĠFinn":15368,"Ġpriorities":15369,"Ġthereof":15370,"ĠStage":15371,"ĠRogers":15372,"Ġsubstitute":15373,"ĠJar":15374,"ĠJefferson":15375,"Ġlightly":15376,"102":15377,"ĠLisa":15378,"uits":15379,"ysical":15380,"Ġshifts":15381,"Ġdrones":15382,"Ġworkplace":15383,"Ġresid":15384,"ensed":15385,"ahn":15386,"Ġpreferences":15387,"server":15388,"Ġdebates":15389,"doc":15390,"ĠGods":15391,"Ġhelicopter":15392,"Ġhonour":15393,"Ġconsiderably":15394,"eded":15395,"ĠFemale":15396,"ĠAnne":15397,"Ġreun":15398,"ĠFace":15399,"ĠHallow":15400,"ĠBudget":15401,"Ġcondemn":15402,"Ġtender":15403,"Prof":15404,"ocratic":15405,"ĠTurner":15406,"ĠAgric":15407,"Ġ1976":15408,"Ġapt":15409,"disc":15410,"ĠFighter":15411,"ĠAur":15412,"Ġgarbage":15413,"input":15414,"ĠKarl":15415,"ĠOliver":15416,"ĠLanguage":15417,"kn":15418,"Non":15419,"ĠClar":15420,"Ġtraditions":15421,"Ġadvertisement":15422,"ĠSor":15423,"Ġarchive":15424,"Ġvillages":15425,"750":15426,"Ġimplementing":15427,"waukee":15428,"Ġdietary":15429,"Ġswitching":15430,"Republic":15431,"Ġvelocity":15432,"Ġcit":15433,"ĠAwards":15434,"Ġfinancing":15435,"Ġlasted":15436,")]":15437,"Ġreminder":15438,"Person":15439,"Ġprecision":15440,"Ġdesigners":15441,"ĠFried":15442,"ĠBorder":15443,"Ġtragic":15444,"Ġwield":15445,"Ġinitiatives":15446,"ĠTank":15447,"wer":15448,"Ġjoins":15449,"Ro":15450,"inery":15451,"Ġarrow":15452,"Ġgenerating":15453,"founder":15454,"Ġsearches":15455,"Ġrandomly":15456,"Access":15457,"Ġbatch":15458,"Ġposed":15459,"lat":15460,"Ġpursuing":15461,"asa":15462,"Ġtestified":15463,"forming":15464,"ĠShar":15465,"wiki":15466,"ĠEither":15467,"Sometimes":15468,"Ġsenators":15469,"ĠJohnny":15470,"ĠTaliban":15471,"ĠGPS":15472,"\":\"/":15473,"ãģ®å":15474,"Ġanalyzed":15475,"ĠRubio":15476,"ĠMovement":15477,"opard":15478,"iii":15479,"Stand":15480,"fight":15481,"Ġignoring":15482,"iang":15483,"ĠGN":15484,"soever":15485,"ĠSTAT":15486,"Ġrefusing":15487,"Ġsweat":15488,"Ġbay":15489,"PORT":15490,"irmed":15491,"aky":15492,"Ġdispro":15493,"Ġlabeled":15494,"Ġ108":15495,"Hello":15496,"Ġpleasant":15497,"aba":15498,"Ġtriumph":15499,"Ġaboard":15500,"Ġincom":15501,"ĠCrow":15502,"lett":15503,"Ġfolk":15504,"Ġchase":15505,"``":15506,"ĠBrus":15507,"Ġteens":15508,"cue":15509,"Ġterrain":15510,"hyd":15511,"ilight":15512,"ORY":15513,"Support":15514,"ews":15515,"lli":15516,"raints":15517,"ĠCand":15518,"Ġabused":15519,"achment":15520,"larg":15521,"Bas":15522,"ĠCancer":15523,"Ġ1978":15524,"Ġsupporter":15525,"access":15526,"ĠTermin":15527,"ĠTampa":15528,"ĠANY":15529,"Ġnewest":15530,"ĠCriminal":15531,"edu":15532,"Ġ1930":15533,"Ġadmits":15534,"Ġende":15535,"Ġfailures":15536,"urate":15537,"fulness":15538,"cycl":15539,"ĠSubject":15540,"Ġinfinite":15541,"three":15542,"WA":15543,"pit":15544,"ĠInstall":15545,"Rad":15546,"iliation":15547,"GM":15548,"Ġcontinent":15549,"Ġaccommodate":15550,"ĠClay":15551,"Ġpup":15552,"ĠFunction":15553,"Ġhammer":15554,"ĠAlberta":15555,"Ġrevised":15556,"Ġminorities":15557,"Ġmeasurement":15558,"Connell":15559,"Ġdisable":15560,"ĠMix":15561,"Incre":15562,"Ġfork":15563,"ĠRosen":15564,"Ġimplies":15565,"umblr":15566,"ANG":15567,"Ġproteins":15568,"Ġaggression":15569,"Ġfacilitate":15570,"SN":15571,"Ġillegally":15572,"uer":15573,"Ġacadem":15574,"Ġpuzz":15575,"ĠShift":15576,"pay":15577,"ollo":15578,"Ġaudiences":15579,"Build":15580,"Ġnoble":15581,"Ġsyntax":15582,"âĺħ":15583,"Ġbeam":15584,"ĠBed":15585,"ĠAld":15586,"Ġorigins":15587,"video":15588,"Ġ1977":15589,"ĠAssault":15590,"Ġgarage":15591,"Team":15592,"Ġverdict":15593,"Ġdwar":15594,"ĠVirtual":15595,"event":15596,"Keep":15597,"Ġsentiment":15598,"Ġwildlife":15599,"shirt":15600,"Ġburg":15601,"Ġrecommendation":15602,"represent":15603,"Ġgallery":15604,"owners":15605,"Ġscholar":15606,"Ġconvenience":15607,"ĠSwift":15608,"Ġconvinc":15609,"Cap":15610,"Ġwarfare":15611,"ĠVisual":15612,"Ġconstitute":15613,"Ġabort":15614,"ĠWeather":15615,"ĠLooking":15616,"ĠHem":15617,"Ġmartial":15618,"Ġincoming":15619,"etition":15620,"Ġtolerance":15621,"ĠCreated":15622,"Ġflows":15623,"ĠElder":15624,"Ġsouls":15625,"Ġfoul":15626,"ĠPain":15627,"ĠCAN":15628,"Ġ220":15629,"bc":15630,"hend":15631,"Ġgenius":15632,"Real":15633,"ĠWr":15634,"ometer":15635,"pad":15636,"Ġlimiting":15637,"ĠSi":15638,"ĠLore":15639,"ĠAdventures":15640,"Ġvaried":15641,"Disc":15642,"fin":15643,"ĠPersonal":15644,"Chris":15645,"Ġinvented":15646,"Ġdive":15647,"ĠRise":15648,"Ġoz":15649,"ĠComics":15650,"Ġexpose":15651,"ĠReb":15652,"letters":15653,"site":15654,"imated":15655,"Ġhacking":15656,"Ġeducated":15657,"ĠNobody":15658,"Ġdepri":15659,"Ġincentive":15660,"ãĤ·":15661,"Ġoversight":15662,"Ġtribes":15663,"ĠBelgium":15664,"Ġlicensing":15665,"ourt":15666,"Product":15667,"ahl":15668,"ĠGem":15669,"Ġspecialist":15670,"Ġcra":15671,"anners":15672,"ĠCorbyn":15673,"Ġ1973":15674,"READ":15675,"Ġsummar":15676,"Ġoverlook":15677,"ĠApplication":15678,"Ġinappropriate":15679,"Ġdownloaded":15680,"Que":15681,"ĠBears":15682,"Ġthumb":15683,"ĠCharacter":15684,"ĠReincarnated":15685,"ĠSid":15686,"Ġdemonstrates":15687,"sky":15688,"ĠBloomberg":15689,"ĠArray":15690,"ĠResults":15691,"ĠFourth":15692,"ĠEDT":15693,"ĠOscar":15694,"cend":15695,"Ġ106":15696,"ĠNULL":15697,"ĠHERE":15698,"match":15699,"ĠBrun":15700,"Ġglucose":15701,"ieg":15702,"egu":15703,"Ġcertified":15704,"Ġrelie":15705,"Ġhumanitarian":15706,"Ġprayers":15707,"King":15708,"Ġnan":15709,"hou":15710,"108":15711,"ulu":15712,"Ġrenewable":15713,"Ġdistinguish":15714,"Ġdense":15715,"ĠVent":15716,"ĠPackage":15717,"ĠBoss":15718,"Ġeditors":15719,"Ġmigr":15720,"Tra":15721,"ĠPeters":15722,"ĠArctic":15723,"2004":15724,"ĠCape":15725,"Ġlocally":15726,"Ġlasting":15727,"Ġhandy":15728,".).":15729,"Pan":15730,"ĠRES":15731,"Index":15732,"Ġtensions":15733,"Ġformerly":15734,"Ġideological":15735,"Ġsensors":15736,"Ġdealers":15737,"Ġdefines":15738,"Sk":15739,"Ġproceeds":15740,"Ġproxy":15741,"azines":15742,"ĠBash":15743,"ĠPad":15744,"ĠCraft":15745,"ealous":15746,"Ġsheets":15747,"ometry":15748,"June":15749,"clock":15750,"TT":15751,"ĠTheatre":15752,"ĠBuzz":15753,"Ġchapters":15754,"Ġmillenn":15755,"Ġdough":15756,"ĠCongressional":15757,"Ġimagined":15758,"avior":15759,"Ġclinic":15760,"Ġ1945":15761,"Ġholder":15762,"root":15763,"olester":15764,"Ġrestart":15765,"BN":15766,"ĠHamas":15767,"ĠJob":15768,"Ġorb":15769,"Ġram":15770,"Ġdisclose":15771,"Ġtranslate":15772,"Ġimmigrant":15773,"Ġannoying":15774,"Ġtreaty":15775,"anium":15776,"ĠTea":15777,"ĠLegion":15778,"Ġcrowds":15779,"ĠBec":15780,"ĠAer":15781,"ohyd":15782,"Bro":15783,"Looking":15784,"Ġlbs":15785,"Ġaggress":15786,"Ġseam":15787,"Ġintercept":15788,"ĠMI":15789,"mercial":15790,"activ":15791,"ĠCit":15792,"Ġdimension":15793,"Ġconsistency":15794,"Ġrushing":15795,"ĠDouglas":15796,"Ġtrim":15797,"Install":15798,"icker":15799,"Ġshy":15800,"106":15801,"Ġmentions":15802,"pelled":15803,"ĠTak":15804,"cost":15805,"Ġclassroom":15806,"Ġfortune":15807,"driven":15808,"Ġunle":15809,"ĠWheel":15810,"Ġinvestor":15811,"ĠMasters":15812,"kit":15813,"Ġassociations":15814,"ĠEvolution":15815,"oping":15816,"uscript":15817,"Ġprovincial":15818,"ĠWalter":15819,"avi":15820,"SO":15821,"Ġunlimited":15822,"English":15823,"ĠCards":15824,"ĠEbola":15825,"nered":15826,"Ġrevenge":15827,"Ġoutright":15828,"umper":15829,"Ġfitting":15830,"ĠSolid":15831,"Ġformally":15832,"Ġproblematic":15833,"Ġhazard":15834,"Ġencryption":15835,"Ġstraightforward":15836,"ĠAK":15837,"Ġpse":15838,"ĠOrb":15839,"ĠChamber":15840,"ĠMak":15841,"Contents":15842,"Ġloyalty":15843,"Ġlyrics":15844,"ĠSym":15845,"Ġwelcomed":15846,"Ġcooked":15847,"Ġmonop":15848,"Ġnurse":15849,"Ġmisleading":15850,"Ġeternal":15851,"Ġshifting":15852,"Ġ+=":15853,"Vis":15854,"Ġinstitutional":15855,"illary":15856,"Ġpant":15857,"VERT":15858,"ĠACC":15859,"ĠEnh":15860,"Ġincon":15861,"ĠREUTERS":15862,"Ġdonated":15863,"âĢ¦âĢ¦âĢ¦âĢ¦":15864,"Intern":15865,"Ġexhibit":15866,"Ġtire":15867,"ĠRic":15868,"ĠChampion":15869,"ĠMuhammad":15870,"NING":15871,"ĠSoccer":15872,"Ġmobility":15873,"Ġvarying":15874,"ĠMovie":15875,"Ġlord":15876,"oak":15877,"Field":15878,"Ġvector":15879,"usions":15880,"Ġscrap":15881,"Ġenabling":15882,"make":15883,"Tor":15884,".*":15885,"||":15886,"ĠWebsite":15887,"ĠNPC":15888,"Ġsocialist":15889,"ĠBilly":15890,"ĠAdditional":15891,"Ġcargo":15892,"Ġfarms":15893,"ĠSoon":15894,"ĠPrize":15895,"Ġmidnight":15896,"Ġ900":15897,"seen":15898,"ĠSpot":15899,"Ġsheep":15900,"Ġsponsored":15901,"ĠHi":15902,"ĠJump":15903,"Ġ1967":15904,"Microsoft":15905,"ĠAgent":15906,"Ġcharts":15907,"dir":15908,"Ġadjacent":15909,"Ġtricks":15910,"Ġmanga":15911,"Ġexagger":15912,"/>":15913,"football":15914,"ĠFCC":15915,"GC":15916,"ĠTier":15917,"andra":15918,"OUND":15919,"%),":15920,"Ġfruits":15921,"VC":15922,"ĠAA":15923,"Rober":15924,"Ġmidst":15925,"âĹ":15926,"anka":15927,"Ġlegislature":15928,"ĠNeil":15929,"Ġtourists":15930,"\"\"":15931,"ĠWarning":15932,"ĠNevertheless":15933,"ĠOfficial":15934,"ĠWhatever":15935,"Ġmold":15936,"Ġdrafted":15937,"Ġsubstances":15938,"Ġbreed":15939,"Ġtags":15940,"ĠTask":15941,"Ġverb":15942,"Ġmanufactured":15943,"comments":15944,"ĠPolish":15945,"Prov":15946,"Ġdetermines":15947,"Obama":15948,"kers":15949,"Ġutterly":15950,"Ġsect":15951,"sche":15952,"ĠGates":15953,"ĠChap":15954,"Ġaluminum":15955,"Ġzombie":15956,"ĠTouch":15957,"ĠUP":15958,"Ġsatisfy":15959,"Ġpredomin":15960,"ascript":15961,"Ġelaborate":15962,"Ġ1968":15963,"Ġmeasuring":15964,"ĠVari":15965,"anyahu":15966,"Ġsir":15967,"ulates":15968,"idges":15969,"ickets":15970,"ĠSpencer":15971,"TM":15972,"oubted":15973,"Ġprey":15974,"Ġinstalling":15975,"ĠCab":15976,"reed":15977,"reated":15978,"Supp":15979,"Ġwrist":15980,"ĠKerry":15981,"107":15982,"ĠKle":15983,"ĠRachel":15984,"Ġcotton":15985,"ĠARE":15986,"ĠEle":15987,"Control":15988,"Ġloads":15989,"ĠDod":15990,"anas":15991,"bone":15992,"Ġclassical":15993,"ĠRegional":15994,"ĠInteg":15995,"VM":15996,"Ġdesires":15997,"Ġautism":15998,"supported":15999,"ĠMessage":16000,"Ġcompact":16001,"writer":16002,"Ġ109":16003,"ĠHurricane":16004,"cision":16005,"Ġcycles":16006,"Ġdrill":16007,"Ġcolleague":16008,"Ġmaker":16009,"German":16010,"Ġmistaken":16011,"Sun":16012,"ĠGay":16013,"Ġwhatsoever":16014,"Ġsells":16015,"ĠAirl":16016,"liv":16017,"ĠOption":16018,"Ġsolved":16019,"Ġsectors":16020,"Ġhorizontal":16021,"Ġequation":16022,"ĠSkill":16023,"ĠBio":16024,"gement":16025,"ĠSnap":16026,"ĠLegal":16027,"Ġtrademark":16028,"Ġmakeup":16029,"Ġassembled":16030,"Ġsaves":16031,"ĠHalloween":16032,"ĠVermont":16033,"ĠFROM":16034,"Ġfarming":16035,"ĠPodcast":16036,"acceptable":16037,"ĠHigher":16038,"Ġasleep":16039,"ullivan":16040,"Ġreferen":16041,"ĠLev":16042,"Ġbullets":16043,"oko":16044,"HC":16045,"Ġstairs":16046,"Ġmaintains":16047,"ĠLower":16048,"ĠVi":16049,"Ġmarine":16050,"Ġacres":16051,"Ġcoordinator":16052,"ĠJoh":16053,"Ġcounterparts":16054,"ĠBrothers":16055,"Ġindict":16056,"bra":16057,"Ġchunk":16058,"Ġcents":16059,"Home":16060,"ĠMonth":16061,"Ġaccordingly":16062,"ifles":16063,"ĠGermans":16064,"ĠSyn":16065,"Hub":16066,"Ġeyeb":16067,"âĶĢâĶĢâĶĢâĶĢ":16068,"Ġranges":16069,"ĠHolland":16070,"ĠRobot":16071,"fc":16072,"Mike":16073,"Ġplasma":16074,"Ġswap":16075,"Ġathlete":16076,"ĠRams":16077,",'\"":16078,"Ġinfections":16079,"Ġcorrid":16080,"Ġvib":16081,"Ġpatches":16082,"Ġtraditionally":16083,"Ġrevelation":16084,"Ġsweep":16085,"Ġglance":16086,"Ġinex":16087,"2003":16088,"ĠRaw":16089,"working":16090,"osures":16091,"ĠDat":16092,"ĠLynch":16093,"Ġleverage":16094,"ĠReid":16095,"Ġcorrelation":16096,"iances":16097,"avascript":16098,"Ġrepository":16099,"retty":16100,"Ġ1972":16101,"240":16102,"Ġoun":16103,"pol":16104,"ĠReed":16105,"Ġtactical":16106,"isite":16107,"Apple":16108,"ĠQuinn":16109,"Ġraped":16110,"illo":16111,"Europe":16112,"Ġalgorithms":16113,"ĠRodrig":16114,"iu":16115,"Ġillum":16116,"Ġfame":16117,"Ġintroducing":16118,"Ġdelays":16119,"ĠRaiders":16120,"Ġwhistle":16121,"Ġnovels":16122,"ĠReally":16123,"Ġderiv":16124,"Ġpublications":16125,"ĠNeither":16126,"ĠCommerce":16127,"Ġaston":16128,"language":16129,"Notes":16130,"ĠRoth":16131,"ĠFear":16132,"Ġmate":16133,"Ġparade":16134,"ĠQB":16135,"Ġmaneu":16136,"ĠCincinnati":16137,"mitting":16138,"Ġwaist":16139,"ĠRew":16140,"Ġdiscont":16141,"а":16142,"Ġstaring":16143,"Ġalias":16144,"Ġsecurities":16145,"Ġtoilet":16146,"ĠJedi":16147,"Ġunlaw":16148,"vised":16149,"////////":16150,"](":16151,"ĠWeiss":16152,"Ġprest":16153,"ĠCompan":16154,"Ġmemo":16155,"ĠGrace":16156,"July":16157,"ĠElite":16158,"center":16159,"ĠStay":16160,"Ġgalaxy":16161,"Ġtooth":16162,"ĠSettings":16163,"Ġsubjected":16164,"ãĤ¦":16165,"Ġlineback":16166,"Ġretailers":16167,"ĠWant":16168,"Ġdangers":16169,"Air":16170,"Ġvoluntary":16171,"eway":16172,"Ġinterpreted":16173,"otine":16174,"ç":16175,"Ġpel":16176,"Service":16177,"ĠEventually":16178,"Ġcareers":16179,"Ġthreaten":16180,"Ġmemor":16181,"ĠBradley":16182,"ancies":16183,"sn":16184,"ĠUnknown":16185,"National":16186,"Ġshadows":16187,"ailand":16188,"ĠDash":16189,"Everyone":16190,"izzard":16191,"March":16192,"=(":16193,"Ġpulls":16194,"Ġstranger":16195,"Ġbackwards":16196,"ĠBernard":16197,"imensional":16198,"Ġchron":16199,"Ġtheoretical":16200,"ktop":16201,"Ġware":16202,"ĠInvestig":16203,"ĠIniti":16204,"ĠOperations":16205,"oven":16206,"ocide":16207,"*/":16208,"Ġflames":16209,"ĠCash":16210,"shit":16211,"Ġcab":16212,"ĠAnaly":16213,"ĠSeah":16214,"Ġdefining":16215,"Ġordering":16216,"Ġimmun":16217,"Ġpersistent":16218,"ACH":16219,"Russian":16220,"mans":16221,"Ġhind":16222,"Ġphotography":16223,"©":16224,"Ġhug":16225,"Ġ107":16226,"ĠHence":16227,"iots":16228,"udeau":16229,"Ġsubsidies":16230,"Ġroutinely":16231,"ĠDevice":16232,"itic":16233,"Ġdisgust":16234,"lander":16235,"Ġ1940":16236,"Ġassignment":16237,"ĠBesides":16238,"wick":16239,"ĠDust":16240,"usc":16241,"structed":16242,"111":16243,"develop":16244,"Ġfond":16245,"Ġintersection":16246,"Ġdignity":16247,"Ġcommissioner":16248,"Without":16249,"reach":16250,"Ġcartoon":16251,"Ġscales":16252,"ãĥŃ":16253,"FIG":16254,"Ġsurveys":16255,"ĠIndonesia":16256,"Ġartwork":16257,"Ġunch":16258,"Ġcycling":16259,"unct":16260,"auer":16261,"orate":16262,"ĠObviously":16263,"Ġcharacterized":16264,"feld":16265,"Ġaffirm":16266,"Ġinnings":16267,"Ġé":16268,"Ġaliens":16269,"Ġcloth":16270,"etooth":16271,"ĠCertain":16272,"§":16273,"Ġdigest":16274,"know":16275,"ĠXL":16276,"Ġpredictions":16277,"Ġdin":16278,"WAR":16279,"Ġaftermath":16280,"Example":16281,"ĠSuccess":16282,"ĠThr":16283,"IGN":16284,"Ġminer":16285,"Bus":16286,"Ġclarity":16287,"heimer":16288,"ĠOUT":16289,"ĠSend":16290,"ĠCircle":16291,"ĠDiet":16292,"Ġpronounced":16293,"Ġcreators":16294,"Ġearthquake":16295,"attery":16296,"geons":16297,"Ġod":16298,"Ġlaying":16299,"orp":16300,"Ult":16301,"project":16302,"Ġundermin":16303,"Ġsequel":16304,"Sam":16305,"ĠDarkness":16306,"Ġreception":16307,"bull":16308,"YS":16309,"ĠVir":16310,"Ġsequences":16311,"ĠCoin":16312,"Ġoutfit":16313,"ĠWait":16314,"119":16315,"Ġdelivers":16316,"......":16317,"Ġblown":16318,"ĠEsc":16319,"ĠMath":16320,"perm":16321,"ĠUl":16322,"Ġglim":16323,"Ġfacial":16324,"Ġgreenhouse":16325,"Ġtokens":16326,"/-":16327,"ĠAnnual":16328,"ĠONE":16329,"Ġteenage":16330,"ĠPhysical":16331,"ĠLang":16332,"ĠCelt":16333,"Ġsued":16334,"ividually":16335,"Ġpatience":16336,"chair":16337,"regular":16338,"Ġaug":16339,"inv":16340,"except":16341,"ĠLil":16342,"Ġnest":16343,"fd":16344,"sum":16345,"ĠChase":16346,"Russia":16347,"ĠJennifer":16348,"Ġoffseason":16349,"Overall":16350,"Fore":16351,"Ġriot":16352,"Aud":16353,"former":16354,"Ġdefenders":16355,"ĠCT":16356,"iotic":16357,"ribly":16358,"Ġautomated":16359,"Ġpenis":16360,"Ġinsist":16361,"Ġdiagram":16362,"ĠSQL":16363,"ĠGarc":16364,"Ġwitch":16365,"client":16366,"ierra":16367,"ambers":16368,"Ġrecount":16369,"far":16370,"Very":16371,"osterone":16372,"Ġappreciated":16373,"ĠPerfect":16374,"Section":16375,"Ġdoses":16376,"ocaust":16377,"Ġcostly":16378,"Ġgrams":16379,"ĠShi":16380,"Ġwrestling":16381,"Ġ1971":16382,"Ġtrophy":16383,"Ġnerve":16384,"ĠKaz":16385,"ĠExperience":16386,"Ġpledged":16387,"Ġplayback":16388,"Ġcreativity":16389,"bye":16390,"Ġattackers":16391,"Ġholders":16392,"ĠCoach":16393,"ĠPhD":16394,"Ġtransfers":16395,"Ġcolored":16396,"ĠHindu":16397,"Ġdrown":16398,"Ġlistened":16399,"ĠWA":16400,"iasm":16401,"PO":16402,"Ġappealing":16403,"Ġdisclosed":16404,"ĠChicken":16405,"agging":16406,"Ġpleaded":16407,"Ġnavigation":16408,"ĠReturns":16409,"Ġ[[":16410,"ROR":16411,"EA":16412,"Ġphotographer":16413,"ĠRider":16414,"ippers":16415,"Ġslice":16416,"Ġerect":16417,"Ġhed":16418,"issance":16419,"ĠVikings":16420,"urious":16421,"Ġappet":16422,"oubtedly":16423,"Child":16424,"Ġauthentic":16425,"oos":16426,"ĠMaking":16427,"Ġannouncing":16428,"Ġbod":16429,"Ġmeter":16430,"ĠNine":16431,"ĠRogue":16432,"Ġworkforce":16433,"Ġrenewed":16434,"Ġorganisations":16435,"acs":16436,"PLE":16437,"Short":16438,"Ġcompounds":16439,"ĠVisit":16440,"Ġenvelop":16441,"earth":16442,"Ġsupportive":16443,"ggle":16444,"ĠBrussels":16445,"ĠGuild":16446,"Create":16447,"REL":16448,"Ġaveraged":16449,"Ġ1969":16450,"riages":16451,"Ġlengthy":16452,"Ġforgot":16453,"Okay":16454,"ĠErd":16455,"Ġdealer":16456,"Ġrecession":16457,"DD":16458,"Ġdesperately":16459,"Ġhunger":16460,"Ġsticks":16461,"Ġmph":16462,"ĠFaith":16463,"Ġintentionally":16464,"Ġdemol":16465,"ueller":16466,"ĠSale":16467,"Ġdebris":16468,"spring":16469,"Ġleap":16470,">>>>":16471,"Ġcontainers":16472,"selling":16473,"ranean":16474,"attering":16475,"Ġcommented":16476,"ĠCM":16477,"onut":16478,"Ġwoods":16479,"especially":16480,"Ġorganize":16481,"ivic":16482,"ĠWoods":16483,"anga":16484,"squ":16485,"Ġmaj":16486,"amon":16487,"Ġaxis":16488,"Ġ1974":16489,"ĠDenmark":16490,"Ġwarrior":16491,"ĠPand":16492,"Ġoutlined":16493,"ĠBO":16494,"insula":16495,"zilla":16496,"ebook":16497,"Ġdare":16498,"Ġsearched":16499,"Ġnavigate":16500,"Sn":16501,"writing":16502,"Ġunited":16503,"Japan":16504,"ĠHebrew":16505,"Ġflame":16506,"Ġrelies":16507,"Ġcatching":16508,"ĠSho":16509,"Ġimprisonment":16510,"Ġpockets":16511,"Ġclosure":16512,"ĠFam":16513,"tim":16514,"adequ":16515,"Activity":16516,"Ġrecruiting":16517,"ĠWATCH":16518,"ĠArgentina":16519,"dest":16520,"Ġapologize":16521,"oro":16522,"Ġlacks":16523,"Ġtuned":16524,"ĠGriffin":16525,"Ġinfamous":16526,"Ġcelebrity":16527,"sson":16528,"Ġ----------------------------------------------------------------":16529,"ĠIsis":16530,"ĠDisplay":16531,"Ġcredibility":16532,"Ġeconomies":16533,"Ġheadline":16534,"ĠCowboys":16535,"Ġindef":16536,"Ġlately":16537,"Ġincentives":16538,"button":16539,"ĠMob":16540,"Aut":16541,"Ġresigned":16542,"ĠOm":16543,"camp":16544,"Ġprofiles":16545,"Ġschemes":16546,"olphins":16547,"ayed":16548,"Clinton":16549,"enh":16550,"ĠYahoo":16551,"Ġabst":16552,"Ġank":16553,"suits":16554,"Ġwished":16555,"ĠMarco":16556,"udden":16557,"Ġsphere":16558,"ĠBishop":16559,"Ġincorporated":16560,"ĠPlant":16561,"114":16562,"Ġhated":16563,"pic":16564,"Ġdonate":16565,"Ġlined":16566,"Ġbeans":16567,"Ġstealing":16568,"Ġcostume":16569,"Ġsheriff":16570,"Ġforty":16571,"Ġintact":16572,"Ġadapted":16573,"Ġtravelling":16574,"bart":16575,"Ġnicely":16576,"Ġdried":16577,"Ġscal":16578,"osity":16579,"NOTE":16580,"ĠBh":16581,"ĠBroncos":16582,"ĠIgn":16583,"Ġintimate":16584,"Ġchemistry":16585,"Ġoptimal":16586,"Deb":16587,"ĠGeneration":16588,"Ġ],":16589,"ichi":16590,"ĠWii":16591,"ĠYOUR":16592,"ventions":16593,"Write":16594,"Ġpopul":16595,"unning":16596,"ĠWor":16597,"Vol":16598,"Ġqueen":16599,"heads":16600,"KK":16601,"Ġanalyze":16602,"opic":16603,"earchers":16604,"Ġdot":16605,"legraph":16606,"astically":16607,"Ġupgrades":16608,"Ġcares":16609,"Ġextending":16610,"Ġfreeze":16611,"Ġinability":16612,"Ġorgans":16613,"Ġpretend":16614,"Ġoutlet":16615,"113":16616,"olan":16617,"ĠMall":16618,"uling":16619,"talk":16620,"Ġexpressing":16621,"ĠAlways":16622,"ĠBegin":16623,"files":16624,"Ġlicenses":16625,"%%":16626,"ĠMitt":16627,"Ġfilters":16628,"ĠMilwaukee":16629,"GN":16630,"Ġunfold":16631,"Mo":16632,"Ġnutrition":16633,"ppo":16634,"Bo":16635,"Ġfounding":16636,"Ġundermine":16637,"Ġeasiest":16638,"ĠCzech":16639,"ĠMack":16640,"Ġsexuality":16641,"ĠNixon":16642,"Win":16643,"ĠArn":16644,"ĠKin":16645,"ãĤ£":16646,"icer":16647,"Ġfortun":16648,"Ġsurfaces":16649,"aghd":16650,"Ġcarriers":16651,"ĠPART":16652,"ĠTib":16653,"Ġinterval":16654,"Ġfrustrating":16655,"ĠShip":16656,"ĠArmed":16657,"ffe":16658,"Ġboats":16659,"ĠAbraham":16660,"inis":16661,"Ġsuited":16662,"thread":16663,"iov":16664,"abul":16665,"ĠVenezuela":16666,"Ġtom":16667,"super":16668,"Ġcastle":16669,"although":16670,"ioxide":16671,"eches":16672,"Ġevolutionary":16673,"Ġnegotiate":16674,"Ġconfronted":16675,"Remember":16676,"Ġ170":16677,"Such":16678,"Ġ911":16679,"mult":16680,"ĠAbyss":16681,"urry":16682,"kees":16683,"spec":16684,"ĠBarbara":16685,"Ġbelonging":16686,"Ġvillain":16687,"istani":16688,"Ġaccountable":16689,"Ġportions":16690,"ĠDecl":16691,"Ur":16692,"ĠKate":16693,"gre":16694,"Ġmagazines":16695,"UCK":16696,"Ġregulate":16697,"omon":16698,"ĠAlmost":16699,"Ġoverview":16700,"Ġscram":16701,"Ġloot":16702,"ĠFitz":16703,"Ġcharacteristic":16704,"ĠSnake":16705,"say":16706,"ĠRico":16707,"Ġtrait":16708,"ĠJoined":16709,"aucus":16710,"Ġadaptation":16711,"ĠAirlines":16712,"Ġarchae":16713,"ĠIde":16714,"Ġbikes":16715,"Ġliterary":16716,"Ġinfluences":16717,"ĠUsed":16718,"Creat":16719,"Ġplea":16720,"ĠDefence":16721,"ĠAssass":16722,"Ġpond":16723,"ULT":16724,")\"":16725,"Ġevaluated":16726,"Ġobtaining":16727,"Ġdemographic":16728,"Ġvigil":16729,"aley":16730,"Ġspouse":16731,"ĠSeahawks":16732,"respons":16733,"ĠBelt":16734,"umatic":16735,"Ġrises":16736,"runner":16737,"ĠMichelle":16738,"Ġpotent":16739,"race":16740,"ĠPAC":16741,"Find":16742,"olesterol":16743,"ISS":16744,"ĠIntroduced":16745,"resses":16746,"ignment":16747,"Os":16748,"ĠTu":16749,"ĠDex":16750,"icides":16751,"Ġsparked":16752,"ĠLaura":16753,"ĠBryant":16754,"Ġsmiling":16755,"ĠNexus":16756,"Ġdefendants":16757,"ĠCatal":16758,"Ġdishes":16759,"shaped":16760,"Ġprolong":16761,"mt":16762,"($":16763,"ãĢĤ":16764,"Ġcalculations":16765,"ĠSame":16766,"Ġpiv":16767,"HH":16768,"Ġcancelled":16769,"Ġgrin":16770,"Ġterritories":16771,"istically":16772,"Come":16773,"ĠParent":16774,"Project":16775,"Ġneglig":16776,"ĠPrivacy":16777,"Ġammo":16778,"LECT":16779,"olutely":16780,"ĠEpic":16781,"Ġmisunder":16782,"wal":16783,"April":16784,"mos":16785,"pathy":16786,"ĠCarson":16787,"Ġalbums":16788,"ĠEasy":16789,"Ġpistol":16790,"<<":16791,"Ġ\\(":16792,"target":16793,"help":16794,"Ġinterpre":16795,"conscious":16796,"ĠHousing":16797,"ĠJoint":16798,"127":16799,"Ġbeers":16800,"science":16801,"ĠFirefox":16802,"effective":16803,"ĠCabin":16804,"ĠOkay":16805,"ĠApplic":16806,"Ġspacecraft":16807,"ĠSR":16808,"vet":16809,"ĠStrange":16810,"SB":16811,"Ġcorps":16812,"iberal":16813,"efficient":16814,"Ġprevalence":16815,"Ġeconomists":16816,"118":16817,"Thread":16818,"ordable":16819,"ODE":16820,"ĠCant":16821,"=-=-":16822,"ifiable":16823,"ĠAround":16824,"Ġpole":16825,"Ġwillingness":16826,"CLA":16827,"ĠKid":16828,"Ġcomplement":16829,"Ġscattered":16830,"Ġinmates":16831,"Ġbleeding":16832,"every":16833,"Ġqueue":16834,"ĠTrain":16835,"Ġhij":16836,"Ġmelee":16837,"pleted":16838,"Ġdigit":16839,"Ġgem":16840,"official":16841,"Ġlifting":16842,"е":16843,"Requ":16844,"itutes":16845,"Ġpackaging":16846,"ĠWorkers":16847,"hran":16848,"ĠLebanon":16849,"olesc":16850,"Ġpunished":16851,"ĠJuan":16852,"Ġjam":16853,"ĠDocument":16854,"Ġmapping":16855,"icates":16856,"Ġinevitably":16857,"Ġvanilla":16858,"ĠTon":16859,"Ġwatches":16860,"Ġleagues":16861,"Ġinitiated":16862,"degree":16863,"portion":16864,"Ġrecalls":16865,"Ġruin":16866,"Ġmelt":16867,"IAN":16868,"Ġhem":16869,"Exp":16870,"Ġbaking":16871,"ĠColomb":16872,"atible":16873,"Ġradius":16874,"plug":16875,"ĠIF":16876,"etically":16877,"Ġfict":16878,"HER":16879,"ĠTap":16880,"atinum":16881,"Ġink":16882,"Ġcoh":16883,"ĠWizard":16884,"both":16885,"tex":16886,"Ġspends":16887,"ĠCurrently":16888,"ĠPit":16889,"Ġneurons":16890,"ignt":16891,"Ġrall":16892,"Ġbuses":16893,"building":16894,"Ġadjustments":16895,"Ġcried":16896,"iblical":16897,"atted":16898,"ĠZion":16899,"ĠMatter":16900,"Ġmeditation":16901,"ĠDennis":16902,"Ġours":16903,"ĠTab":16904,"Ġrankings":16905,"ortal":16906,"Ġadvers":16907,"Ġsurrender":16908,"ĠGob":16909,"cium":16910,"omas":16911,"imeter":16912,"Ġmultiplayer":16913,"Ġheroin":16914,"Ġoptimistic":16915,"Ġindicator":16916,"ĠBrig":16917,"Ġgrocery":16918,"Ġapplicant":16919,"ĠRocket":16920,"vid":16921,"Exception":16922,"pent":16923,"Ġorganizing":16924,"Ġencounters":16925,"ĠTOD":16926,"Ġjewel":16927,"Save":16928,"ĠChristie":16929,"Ġheating":16930,"Ġlazy":16931,"ĠCP":16932,"Ġcousin":16933,"Config":16934,"Ġregener":16935,"Ġnearest":16936,"Ġachieving":16937,"ENS":16938,"throw":16939,"ĠRichmond":16940,"antle":16941,"2002":16942,"Ġanten":16943,"bird":16944,"133":16945,"Ġnarc":16946,"raint":16947,"unny":16948,"ĠHispanic":16949,"ournaments":16950,"Ġprophe":16951,"ĠThailand":16952,"ĠTi":16953,"Ġinjection":16954,"Ġinherit":16955,"ravis":16956,"Ġmedi":16957,"Ġwhoever":16958,"ĠDEBUG":16959,"GP":16960,"ĠHud":16961,"Card":16962,"prom":16963,"Ġpor":16964,"Ġoverhead":16965,"Law":16966,"Ġviolate":16967,"Ġheated":16968,"Ġdescriptions":16969,"Ġachievements":16970,"ĠBeer":16971,"ĠQuant":16972,"Was":16973,"Ġeighth":16974,"ĠIv":16975,"Ġspecialized":16976,"UPDATE":16977,"ĠDelta":16978,"Pop":16979,"Jul":16980,"ĠAsk":16981,"ophy":16982,"Ġnewsletters":16983,"ĠTool":16984,"Ġgard":16985,"ĠConfeder":16986,"ĠGMT":16987,"ĠAbbott":16988,"Ġimmunity":16989,"ĠVM":16990,"Islam":16991,"Ġimplicit":16992,"wd":16993,"Ġ1944":16994,"ravity":16995,"ometric":16996,"Ġsurviving":16997,"urai":16998,"ĠPrison":16999,"Ġrust":17000,"ĠSketch":17001,"Ġbees":17002,"ĠTheory":17003,"Ġmerit":17004,"Tex":17005,"chat":17006,"Ġmim":17007,"Ġpaste":17008,"ĠKoch":17009,"Ġignorance":17010,"ĠShoot":17011,"Ġbasement":17012,"United":17013,"ĠAdvis":17014,"height":17015,"Ġfoster":17016,"Ġdetain":17017,"information":17018,"Ġneural":17019,"';":17020,"Ġproves":17021,"allery":17022,"Ġinvitation":17023,"umbers":17024,"Ġcattle":17025,"Ġbicycle":17026,"zi":17027,"Ġconsultant":17028,"Ġapology":17029,"ĠTiger":17030,"Ġ123":17031,"999":17032,"Ġindividually":17033,"rt":17034,"igion":17035,"ĠBrazilian":17036,"Ġdisturb":17037,"Ġentrepreneurs":17038,"Ġforests":17039,"cerpt":17040,"plates":17041,"pher":17042,"clipse":17043,"Ġtwitter":17044,"Ġacids":17045,"ographical":17046,"hum":17047,"ĠBald":17048,"ifully":17049,"Ġcompiler":17050,"ĠDA":17051,"Ġdonor":17052,"asi":17053,"Ġtribal":17054,"lash":17055,"ĠConfig":17056,"Ġapplicants":17057,"Ġsalaries":17058,"135":17059,"Putin":17060,"ĠFocus":17061,"irs":17062,"Ġmisconduct":17063,"ĠHaz":17064,"Ġeaten":17065,"Mobile":17066,"Muslim":17067,"ĠMarcus":17068,"viol":17069,"Ġfavorable":17070,"Ġstub":17071,"adin":17072,"ĠHob":17073,"Ġfaithful":17074,"Ġelectronics":17075,"Ġvacuum":17076,"wait":17077,"backed":17078,"economic":17079,"dist":17080,"Ġtenure":17081,"Ġsincere":17082,"ĠTogether":17083,"ĠWave":17084,"Ġprogression":17085,"Ġdenying":17086,"Ġdistress":17087,"braska":17088,"third":17089,"Ġmixing":17090,"Ġcolonial":17091,"Ġprivately":17092,"Ġunrest":17093,"aternity":17094,"Ġpremises":17095,"anti":17096,"gregation":17097,"Ġlicence":17098,"ĠHind":17099,"ĠSamuel":17100,"Ġconvincing":17101,"ĠAce":17102,"ĠRust":17103,"ĠNetanyahu":17104,"Ġhandles":17105,"ĠPatch":17106,"oriented":17107,"aho":17108,"ĠGonz":17109,"Ġhackers":17110,"claimer":17111,"Ġcustoms":17112,"ĠGran":17113,"fighters":17114,"Ġluc":17115,"Ġmanuscript":17116,"arenthood":17117,"Ġdevil":17118,"Ġwarriors":17119,"Ġoffenders":17120,"William":17121,"Ġholidays":17122,"Ġnightmare":17123,"Ġlever":17124,"ifferent":17125,"Stat":17126,"Ġexhibition":17127,"puted":17128,"ĠPure":17129,"Ġalpha":17130,"Ġenthusiasm":17131,"ĠRepresentatives":17132,"EAR":17133,"ĠTyp":17134,"Ġwheat":17135,"ĠAlf":17136,"Ġcorrection":17137,"Ġevangel":17138,"ATT":17139,"Miss":17140,"Ġsoup":17141,"Ġimplied":17142,"param":17143,"Ġsexy":17144,"ĠLux":17145,"Ġrepublic":17146,"patch":17147,"ablish":17148,"Ġicons":17149,"Ġfathers":17150,"ĠGET":17151,"ĠCarib":17152,"Ġregulated":17153,"ĠCohen":17154,"ĠBobby":17155,"Ġner":17156,"Ġbent":17157,"ventory":17158,"ĠAlong":17159,"ĠEST":17160,"ĠWallace":17161,"Ġmurders":17162,"rise":17163,"kell":17164,"ĠCommonwealth":17165,"Ġnasty":17166,"eta":17167,"ĠMIT":17168,"Ġadministered":17169,"Ġgenuinely":17170,"Editor":17171,"nick":17172,"Ġhydro":17173,"********************************":17174,"ĠBle":17175,"Ġfines":17176,"Ġgorge":17177,"ausible":17178,"rh":17179,"Ġapple":17180,"mentioned":17181,"Ġrope":17182,"otyp":17183,"HR":17184,"Ġdisappointing":17185,"Ġcage":17186,"nik":17187,"Ġdoubts":17188,"ĠFREE":17189,"prints":17190,"ĠMUST":17191,"Ġvendors":17192,"ĠInqu":17193,"Ġliberals":17194,"Ġcontractor":17195,"Ġupside":17196,"children":17197,"Ġtricky":17198,"Ġregulators":17199,"charged":17200,"liter":17201,"Ġ***":17202,"Ġrebell":17203,"lang":17204,"Ġlocals":17205,"Ġphysicians":17206,"Ġhey":17207,"arse":17208,"tm":17209,"ĠLex":17210,"Ġbehavioral":17211,"successful":17212,"FX":17213,"Ġbrick":17214,"ovic":17215,"Ġconform":17216,"Ġreviewing":17217,"Ġinsights":17218,"Ġbiology":17219,"ĠRemove":17220,"ĠExtra":17221,"Ġcommitting":17222,"induced":17223,"ignty":17224,"igm":17225,"Ġatomic":17226,"Common":17227,"ĠEM":17228,"ĠPere":17229,"ĠItems":17230,"eh":17231,"Ġpreserved":17232,"ĠHood":17233,"Ġprisoner":17234,"Ġbankruptcy":17235,"Ġgren":17236,"ushes":17237,"Ġexploitation":17238,"Ġsignatures":17239,"Ġfinan":17240,"],\"":17241,"ĠMR":17242,"Ġmeg":17243,"remlin":17244,"Ġmusicians":17245,"Ġselecting":17246,"Ġexamining":17247,"INK":17248,"lated":17249,"Hi":17250,"Ġartic":17251,"Ġpets":17252,"Ġimpair":17253,"ĠMAN":17254,"Ġtablets":17255,"include":17256,"Range":17257,"Ġcaut":17258,"Ġlogs":17259,"Ġmounting":17260,"Ġunaware":17261,"Ġdynamics":17262,"ĠPalestine":17263,"ĠQuarter":17264,"ĠPurple":17265,"Ġma":17266,"ĠImport":17267,"Ġcollections":17268,"ciation":17269,"Ġsuccessor":17270,"Ġclone":17271,"Ġaiming":17272,"Ġpossessed":17273,"Ġsticking":17274,"Ġshaking":17275,"Ġlocate":17276,"ĠHockey":17277,"Turn":17278,"170":17279,"Ġfifteen":17280,"ĠHarrison":17281,"Ġcontinuously":17282,"ĠTC":17283,"ĠValent":17284,"ĠRescue":17285,"Ġbypass":17286,"amount":17287,"Ġmast":17288,"Ġprotects":17289,"Ġartistic":17290,"Ġsometime":17291,"Ġshoe":17292,"Ġshouted":17293,"ificant":17294,"etitive":17295,"ĠRegister":17296,"ĠJin":17297,"Ġconcentrated":17298,"lington":17299,"onies":17300,"Ġgenerator":17301,"yrim":17302,"ĠArmen":17303,"Ġclearing":17304,"ido":17305,"ĠTW":17306,"alph":17307,"Ġladies":17308,"Hard":17309,"Ġdialog":17310,"Ġinputs":17311,"æľ":17312,"Ġposes":17313,"Ġslots":17314,"ĠPremium":17315,"Ġleaks":17316,"Ġbosses":17317,"Ġ113":17318,"course":17319,"Acc":17320,"ĠNewton":17321,"ĠAustria":17322,"ĠMage":17323,"Ġteaches":17324,"abad":17325,"Ġwears":17326,"Ġcyl":17327,"Ġcurse":17328,"ĠSales":17329,"ĠWings":17330,"Ġpsy":17331,"Ġgaps":17332,"ĠIceland":17333,"ĠPinterest":17334,"Ġlandlord":17335,"Ġdefinitions":17336,"ĠKer":17337,"Ġsufficiently":17338,"ĠPence":17339,"ĠArchitect":17340,"Ġsurpass":17341,"Ġ114":17342,"Ġsuperhero":17343,"ĠDisease":17344,"Ġpriests":17345,"ĠCulture":17346,"Ġdefinitive":17347,"Ġsecretly":17348,"ĠDance":17349,"install":17350,"chief":17351,"ĠJessica":17352,"Would":17353,"Updated":17354,"Ġlocker":17355,"ĠKay":17356,"Ġmemorial":17357,"è¦":17358,"fat":17359,"Ġdisgu":17360,"Ġflavors":17361,"ĠBaseball":17362,"ĠResistance":17363,"Ġkicks":17364,"Ġenv":17365,"Ġteenagers":17366,"Dark":17367,"ĠCAR":17368,"Ġhalt":17369,"ĠLG":17370,"ĠGabriel":17371,"Ġfever":17372,"Ġsatur":17373,"Ġmall":17374,"Ġaffiliate":17375,"ĠSleep":17376,"ĠSpecific":17377,"ĠVel":17378,"Ġjar":17379,"ĠSacred":17380,"ĠEdwards":17381,"ĠACL":17382,"Ġretained":17383,"ĠGiant":17384,"Ġlimitation":17385,"inces":17386,"Ġrefusal":17387,"ĠTale":17388,"ĠButler":17389,"Ġaccidents":17390,"ĠCSS":17391,"Ġimported":17392,"ĠCopy":17393,"α":17394,"ERT":17395,"zel":17396,"Ġdivisions":17397,"hots":17398,"ĠAlb":17399,"ĠDS":17400,"Loader":17401,"Washington":17402,"atisf":17403,"ĠCreative":17404,"\\.":17405,"ĠAutom":17406,"redict":17407,"Ġreceptor":17408,"ĠCarlos":17409,"Method":17410,"oka":17411,"Ġmalicious":17412,"Ġstepping":17413,",[":17414,"ĠDad":17415,"Ġattraction":17416,"ĠEffects":17417,"ĠPirate":17418,"ĠCer":17419,"ĠIndustry":17420,"ĠRud":17421,"Ġcharter":17422,"Ġdining":17423,"Ġinsists":17424,"Ġconfigure":17425,"Ġ(#":17426,"ĠSimple":17427,"ĠScroll":17428,"UTC":17429,"175":17430,"ĠKon":17431,"Ġmarketplace":17432,"ĠãĤ":17433,"Ġrefres":17434,"Ġgates":17435,"erred":17436,"ĠPod":17437,"Ġbehave":17438,"Frank":17439,"node":17440,"Ġendorsed":17441,"hett":17442,"asive":17443,"ĠHomeland":17444,"Ġrides":17445,"ĠLeave":17446,"erness":17447,"Ġflooding":17448,"AFP":17449,"Ġrisen":17450,"Ġcontinually":17451,"Ġunanim":17452,"ĠContract":17453,"ĠPas":17454,"Ġguided":17455,"ĠChile":17456,"bd":17457,"Ġsucc":17458,"ptic":17459,"Ġcommittees":17460,"ĠLuther":17461,"ĠAnyone":17462,"Ġsab":17463,"124":17464,"Ġpixel":17465,"ĠBak":17466,"ĠTag":17467,"ĠBennett":17468,"Enter":17469,"small":17470,"ĠPresidential":17471,"Ġpul":17472,"Ġcontrace":17473,"archive":17474,"Ġcoastal":17475,"ĠKids":17476,"192":17477,"âĢ²":17478,"icky":17479,"INGTON":17480,"Ġwolf":17481,"ĠStalin":17482,"Tur":17483,"idget":17484,"amas":17485,"ĠUnless":17486,"Ġsponsor":17487,"Ġmorph":17488,"ĠChoose":17489,"Ġrunner":17490,"Ġunbel":17491,"Ġmud":17492,"ĠMana":17493,"Ġdubbed":17494,"Ġgodd":17495,"urers":17496,"window":17497,"Ġrelied":17498,"Ġcelebrating":17499,"osc":17500,"Ġ135":17501,"Ġlobbying":17502,"Ġincomplete":17503,"Ġrestriction":17504,"Ġincap":17505,"itus":17506,"Ġexpectation":17507,"ĠApollo":17508,"Ġintens":17509,"Ġsync":17510,"GH":17511,"Ġmanipulation":17512,"BY":17513,"Ġspear":17514,"Ġbreasts":17515,"Ġvolcan":17516,"ilia":17517,"Material":17518,"Ġformats":17519,"ĠBast":17520,"Ġparliamentary":17521,"Ġsnake":17522,"Ġservants":17523,"ĠTrudeau":17524,"ĠGrim":17525,"ĠArabic":17526,"ĠSCP":17527,"ĠBoys":17528,"station":17529,"Ġprospective":17530,"orde":17531,"initialized":17532,"Ġbored":17533,"ABLE":17534,"Ġaccessed":17535,"Ġtaxi":17536,"ĠShell":17537,"aiden":17538,"ursed":17539,"inates":17540,"ĠInsurance":17541,"ĠPete":17542,"September":17543,"650":17544,"Ġadventures":17545,"ĠCover":17546,"Ġtribute":17547,"Ġsketch":17548,"Ġempower":17549,"ĠØ":17550,"ĠGlenn":17551,"ĠDaw":17552,"=\\\"":17553,"ĠPolitics":17554,"Ġguides":17555,"Ġdioxide":17556,"ĠGore":17557,"ĠBright":17558,"ĠSierra":17559,"Ġvalued":17560,"cond":17561,"Ġpointer":17562,"Select":17563,"Ġrisky":17564,"Ġabsorb":17565,"images":17566,"Ġrefuses":17567,"Ġbonuses":17568,"___":17569,"Ġhilar":17570,"ĠFeatures":17571,"220":17572,"ĠCollector":17573,"Foot":17574,"Ġ1964":17575,"culus":17576,"Ġdawn":17577,"Ġworkout":17578,"ĠLO":17579,"Ġphilosophical":17580,"ĠSandy":17581,"ĠYouth":17582,"Ġliable":17583,"Af":17584,"blue":17585,"Ġoverturn":17586,"lessness":17587,"ĠTribune":17588,"ĠIng":17589,"Ġfactories":17590,"Ġcatches":17591,"Ġprone":17592,"Ġmatrix":17593,"Ġlogin":17594,"Ġinacc":17595,"Ġexert":17596,"sys":17597,"Ġneedle":17598,"ĠQur":17599,"Ġnotified":17600,"oulder":17601,"tx":17602,"Ġreminds":17603,"Ġpublishers":17604,"Ġnort":17605,"Ġgit":17606,"Ġflies":17607,"ĠEmily":17608,"Ġflowing":17609,"ĠAlien":17610,"ĠStrateg":17611,"Ġhardest":17612,"Ġmodification":17613,"API":17614,"ĠMY":17615,"Ġcrashes":17616,"stairs":17617,"number":17618,"Ġurging":17619,"channel":17620,"ĠFalcon":17621,"Ġinhabitants":17622,"Ġterrifying":17623,"Ġutilize":17624,"Ġbanner":17625,"Ġcigarettes":17626,"Ġsenses":17627,"ĠHolmes":17628,"Ġpractition":17629,"ĠPhillips":17630,"otto":17631,"Ġcompile":17632,"Model":17633,"ĠKo":17634,"Ġ[]":17635,"Americans":17636,"ĠTerms":17637,"Ġmedications":17638,"ĠAna":17639,"Ġfundamentally":17640,"ĠNotice":17641,"Ġweaker":17642,"Ġ0000":17643,"Ġgarlic":17644,"Ġoutbreak":17645,"Ġeconomist":17646,"ĠBirth":17647,"Ġobstacles":17648,"arcer":17649,"ĠOrthodox":17650,"Ġplacebo":17651,"ĠCrew":17652,"aspberry":17653,"ĠAngels":17654,"Ġdischarge":17655,"Ġdestructive":17656,"117":17657,"ĠRising":17658,"Ġdairy":17659,"late":17660,"Ġcollision":17661,"ĠTigers":17662,"eanor":17663,"ocumented":17664,"ĠInvalid":17665,"Ġdont":17666,"ĠLiter":17667,"ĠVa":17668,"Ġhydrogen":17669,"Ġvariants":17670,"ĠBrowns":17671,"Ġ1965":17672,"Ġindigenous":17673,"Ġtrades":17674,"Ġremainder":17675,"Ġswept":17676,"ĠImpact":17677,"Ġredist":17678,"Ġunint":17679,"graduate":17680,"ãĥķ":17681,"ĠWILL":17682,"ãģ®ç":17683,"ĠCritical":17684,"Ġfisher":17685,"Ġvicious":17686,"Ġreversed":17687,"Year":17688,"ĠSox":17689,"Ġshootings":17690,"Ġfilming":17691,"Ġtouchdowns":17692,"aires":17693,"mel":17694,"Ġgrandfather":17695,"Ġaffection":17696,"ingle":17697,"Ġoverly":17698,"Additional":17699,"Ġsupreme":17700,"ĠGrad":17701,"Ġsporting":17702,"Ġmercy":17703,"ĠBrooks":17704,"ounty":17705,"Ġperforms":17706,"Ġtightly":17707,"Ġdemons":17708,"Ġkillings":17709,"Ġfaction":17710,"ĠNova":17711,"auts":17712,"Ġundoubtedly":17713,"arin":17714,"Ġunderway":17715,"rak":17716,"Ġliv":17717,"ĠRegion":17718,"Ġbriefing":17719,"sers":17720,"cloud":17721,"ĠMik":17722,"usp":17723,"Ġprediction":17724,"azor":17725,"Ġportable":17726,"ĠGand":17727,"Ġpresenting":17728,"Ġ1080":17729,"»":17730,"ushi":17731,"ĠSpark":17732,"thereum":17733,"Ġjustification":17734,"ĠNy":17735,"Ġcontractors":17736,"mingham":17737,"ĠStyle":17738,"åħ":17739,"ĠChronicles":17740,"ĠPicture":17741,"Ġproving":17742,"Ġwives":17743,"sett":17744,"Ġmolecules":17745,"ĠFairy":17746,"Ġconsisting":17747,"Ġpier":17748,"alone":17749,"inition":17750,"Ġnucle":17751,"json":17752,"Ġgotta":17753,"Ġmobil":17754,"Ġverbal":17755,"arium":17756,"Ġmonument":17757,"ucked":17758,"Ġ256":17759,"Tech":17760,"minecraft":17761,"ĠTrack":17762,"Ġtile":17763,"Ġcompatibility":17764,"asis":17765,"Ġsadd":17766,"Ġinstructed":17767,"ĠMueller":17768,"Ġlethal":17769,"Ġhormone":17770,"Ġorche":17771,"else":17772,"Ġskelet":17773,"Ġentertaining":17774,"Ġminimize":17775,"again":17776,"Ġundergo":17777,"Ġconstraints":17778,"Ġcigarette":17779,"ĠIslamist":17780,"Ġtravels":17781,"ĠPanthers":17782,"lings":17783,"Care":17784,"Ġlawsuits":17785,"uras":17786,"Ġcryst":17787,"Ġlowered":17788,"Ġaerial":17789,"Ġcombinations":17790,"Ġhaun":17791,"Ġcha":17792,"Ġvine":17793,"Ġquantities":17794,"Ġlinking":17795,"bank":17796,"Ġsoy":17797,"Bill":17798,"ĠAngela":17799,"Ġrecipient":17800,"ĠProtest":17801,"Ġsocket":17802,"Ġsolidarity":17803,"ĠâĨ":17804,"mill":17805,"Ġvaries":17806,"ĠPakistani":17807,"Dragon":17808,"Ġune":17809,"Ġhorizon":17810,"³³³³³³³³":17811,"Ġprovinces":17812,"Ġfrankly":17813,"Ġenacted":17814,"notes":17815,"['":17816,"Ġ192":17817,"ocracy":17818,"Ġendorsement":17819,"Ġovertime":17820,"True":17821,"Lab":17822,"licted":17823,"ĠDNC":17824,"Ġbeats":17825,"ĠJamie":17826,"152":17827,"ĠINT":17828,"Contact":17829,"Ġaccounted":17830,"hash":17831,"ĠPackers":17832,"pires":17833,"Ġlesbian":17834,"Ġamendments":17835,"Ġhopeful":17836,"ĠFinland":17837,"Ġspotlight":17838,"Ġconfigured":17839,"Ġtroubled":17840,"Ġgaze":17841,"ĠCalgary":17842,"Ġreliability":17843,"Ġinsurg":17844,"swer":17845,"buy":17846,"ĠSkin":17847,"Ġpixels":17848,"Ġhandgun":17849,"Ġparas":17850,"Ġcategor":17851,"ĠEL":17852,"ĠRex":17853,"Indeed":17854,"Ġkinda":17855,"Ġconjunction":17856,"ĠBryan":17857,"ĠManufact":17858,"yang":17859,"Plus":17860,"SQL":17861,"ishment":17862,"Ġdominate":17863,"Ġnail":17864,"Ġoath":17865,"Ġerupt":17866,"ĠFine":17867,"itbart":17868,"ĠChip":17869,"ĠAbd":17870,"ĠNam":17871,"Ġbuyer":17872,"Ġdissent":17873,"Leaks":17874,"Contin":17875,"Ġrider":17876,"ĠSomeone":17877,"Ġillusion":17878,"cin":17879,"ĠBoeing":17880,"Ġinadequ":17881,"ovation":17882,"iants":17883,"Ġrebuild":17884,"450":17885,"ĠDestiny":17886,"SW":17887,"ĠTill":17888,"Hit":17889,"iaz":17890,"ĠBangl":17891,"achers":17892,"ĠReform":17893,"Ġsegments":17894,"Ġsystematic":17895,"dc":17896,"ĠConservatives":17897,"Ġportal":17898,"hor":17899,"ĠDragonbound":17900,"Ġdragged":17901,"omo":17902,"Ġthee":17903,"advert":17904,"ĠReports":17905,"ĠEt":17906,"Ġbarrels":17907,"August":17908,"Ġcomparisons":17909,"Ġhex":17910,"Ġanthrop":17911,"\"[":17912,"borough":17913,"abi":17914,"Ġpictured":17915,"playing":17916,"ĠAddress":17917,"ĠMirror":17918,"Smith":17919,"Ġtires":17920,"ĠNPR":17921,"AAAA":17922,"Ġclassification":17923,"ĠThan":17924,"ĠHarm":17925,"ĠRA":17926,"Ġrejection":17927,"mination":17928,"Ġranged":17929,"ĠFalls":17930,"DI":17931,"Host":17932,"ãĤ´":17933,"ĠExample":17934,"listed":17935,"thirds":17936,"Ġsafegu":17937,"brand":17938,"Ġprobable":17939,"Canada":17940,"ITION":17941,"ĠQaeda":17942,"Ġchick":17943,"Ġimports":17944,"hit":17945,"loc":17946,"WW":17947,"Ġblew":17948,"Ġanytime":17949,"Ġwholes":17950,"iked":17951,"Ġcalculation":17952,"create":17953,"ĠOri":17954,"Ġupgraded":17955,"Ġappar":17956,"utory":17957,"ĠMol":17958,"Brit":17959,"ĠJong":17960,"INAL":17961,"ĠStarting":17962,"Ġdice":17963,"urtle":17964,"Ġrelying":17965,"closure":17966,"Ġprofitable":17967,"Ġslaughter":17968,"ĠManual":17969,"caster":17970,"Ġ\"$":17971,"Ġfeather":17972,"ĠSimply":17973,"ieves":17974,"Ġdeterior":17975,"ĠPCI":17976,"Ġstamp":17977,"Ġflaws":17978,"Ġshade":17979,"hammer":17980,"Ġpassport":17981,"Ġconting":17982,"amel":17983,"Ġobservers":17984,"Ġneglect":17985,"ĠRB":17986,"ĠBrotherhood":17987,"Ġskeptical":17988,"family":17989,"usk":17990,"Ġemotionally":17991,"âĻ":17992,"ĠBeta":17993,"asonable":17994,"idity":17995,"ĠMul":17996,"Ġkicking":17997,"ĠCarm":17998,"ollah":17999,"VERTIS":18000,"ĠAthen":18001,"Ġladder":18002,"ĠBullet":18003,"å£":18004,"0001":18005,"ĠWildlife":18006,"ĠMask":18007,"ĠNan":18008,"Rev":18009,"Ġunacceptable":18010,"legal":18011,"Ġcrowded":18012,"agi":18013,"ĠCox":18014,"je":18015,"Ġmorality":18016,"Ġfuels":18017,"Ġcables":18018,"Ġmankind":18019,"ĠCaribbean":18020,"Ġanchor":18021,"Ġbyte":18022,"ĠOften":18023,"ĠOz":18024,"Ġcrafted":18025,"Ġhistorian":18026,"ĠWu":18027,"Ġtowers":18028,"ĠCitizens":18029,"Ġhelm":18030,"Ġcredentials":18031,"Ġsingular":18032,"ĠJesse":18033,"Ġtackles":18034,"Ġcontempt":18035,"Ġafore":18036,"ĠShadows":18037,"Ġnil":18038,"Ġurgent":18039,"apple":18040,"blood":18041,"Ġvon":18042,"Ġoffline":18043,"Ġbreathe":18044,"Ġjumps":18045,"Ġirrelevant":18046,"oxic":18047,"omal":18048,"important":18049,"Jim":18050,"Ġgloves":18051,"arming":18052,"depth":18053,"Ġtalents":18054,"ookie":18055,"ĠSB":18056,"Ġpalm":18057,"uffs":18058,"esta":18059,"IGH":18060,"Ġcanon":18061,"ĠVerizon":18062,"ĠPle":18063,"Ġcoupled":18064,"velt":18065,"Ġfundraising":18066,"ĠGetting":18067,"ĠDLC":18068,"Ġmathematical":18069,"ĠHS":18070,"ĠCardinals":18071,"telling":18072,"Ġsponsors":18073,"ĠÏ":18074,"ĠBulls":18075,"option":18076,"Ġpropose":18077,"Ġmemorable":18078,"Ġembraced":18079,"Ġdeclining":18080,"Health":18081,"eda":18082,"Ġ};":18083,"Ġspam":18084,"mile":18085,"Ġpitcher":18086,"ĠEight":18087,"Ġcaring":18088,"utic":18089,"role":18090,"Ġairline":18091,"ernandez":18092,"ĠAthlet":18093,"Ġcertification":18094,"uxe":18095,"riger":18096,"Ġempir":18097,"Ġsensation":18098,"Ġdism":18099,"Ġbolt":18100,"Ġevolve":18101,"House":18102,"Ġconsultation":18103,"ĠDuty":18104,"Ġtouches":18105,"ĠNathan":18106,"Ġfaint":18107,"had":18108,"\"(":18109,"ĠConsumer":18110,"ĠExtreme":18111,"Ġ127":18112,"ĠHerm":18113,"ĠSacrament":18114,"izoph":18115,"Ġanxious":18116,"ulously":18117,"Ġsocially":18118,"ĠUTC":18119,"Ġsolving":18120,"ĠLetter":18121,"History":18122,"educ":18123,"Price":18124,"));":18125,"Ġreload":18126,"amic":18127,"Ġpork":18128,"Ġdiscourse":18129,"Ġtournaments":18130,"airo":18131,"ĠKur":18132,"ĠCosta":18133,"Ġviolating":18134,"Ġinterfere":18135,"Ġrecreational":18136,"uffle":18137,"Ġspeeches":18138,"Ġneeding":18139,"Ġremembers":18140,"Ġcredited":18141,"nia":18142,"focused":18143,"amera":18144,"Ġbru":18145,"umbs":18146,"ĠCuban":18147,"Ġpreceding":18148,"Ġnonsense":18149,"acial":18150,"Ġsmartphones":18151,"ĠStories":18152,"Sports":18153,"ĠEmergency":18154,"ouncing":18155,"efined":18156,"Ġber":18157,"Ġconsulting":18158,"Ġmasters":18159,"heastern":18160,".\"[":18161,"ĠRunning":18162,"Ġsuscept":18163,"ĠFeng":18164,"America":18165,"prises":18166,"stitial":18167,"ĠWeekly":18168,"ĠGreater":18169,"modules":18170,"ifter":18171,"Graphics":18172,"uler":18173,"Ġwholly":18174,"Ġsuppress":18175,"Ġconcealed":18176,"Ġhappily":18177,"Ġaccepts":18178,"ĠEnjoy":18179,"Ġrivers":18180,"ĠExcept":18181,"225":18182,"ĠNHS":18183,"ĠMcConnell":18184,"Ġpussy":18185,"ferred":18186,"utable":18187,"Ġattain":18188,"Ġ>=":18189,"Ġdeposits":18190,"rophic":18191,"Ġnotorious":18192,"ĠShaw":18193,"ilitation":18194,"Ġepidemic":18195,"allic":18196,"Ġsmallest":18197,"ovich":18198,"Ġaccessories":18199,"perties":18200,"Ġsurplus":18201,"ĠMech":18202,"Ġambig":18203,"ĠImmigration":18204,"Ġchim":18205,"eval":18206,"Ġpracticing":18207,"ĠMystery":18208,"Ġdomains":18209,"ĠSilicon":18210,"apps":18211,"Ġkilometers":18212,"ea":18213,"ĠSmash":18214,"Ġwarranty":18215,"Ġnost":18216,"sil":18217,"rev":18218,"Jon":18219,"ĠDublin":18220,"Ġtastes":18221,"Ġbout":18222,"great":18223,"error":18224,"Ġswitches":18225,"ĠBapt":18226,"DO":18227,"oki":18228,"Ġsourced":18229,"produ":18230,"Ġattachment":18231,"ĠIssue":18232,"ĠQuestion":18233,"Join":18234,"Ġfitted":18235,"Ġunlawful":18236,"^^":18237,"erek":18238,"Ġauthentication":18239,"Ġstole":18240,"Ġaccountability":18241,"label":18242,"Search":18243,"Ġalbeit":18244,"atican":18245,"funded":18246,"ĠAdding":18247,"ĠIQ":18248,"Ġsubmar":18249,"lit":18250,"aque":18251,"ĠLearning":18252,"Ġinteger":18253,"Master":18254,"ĠChrom":18255,"Ġpremier":18256,"Op":18257,"ĠLiu":18258,"Ġblessed":18259,"ĠGlobe":18260,"ĠResponse":18261,"Ġlegitim":18262,"ĠMerkel":18263,"Ġdisposal":18264,"´":18265,"Ġgauge":18266,"peat":18267,"Ġinduced":18268,"Ġquestionable":18269,"arthy":18270,"ĠVit":18271,"ĠFeed":18272,"Until":18273,"Ut":18274,"worthy":18275,"RY":18276,"ĠHerald":18277,"ĠHammer":18278,"Ġmedal":18279,"ĠRivers":18280,"ĠHack":18281,"Ġclarify":18282,"Ġtracked":18283,"Ġautonomous":18284,"Ġtenant":18285,"ĠQatar":18286,"erie":18287,"Ġgrim":18288,"ĠMonitor":18289,"Ġresistant":18290,"ĠSpec":18291,"ĠWells":18292,"NAS":18293,"148":18294,"Ġminers":18295,"iotics":18296,"Ġmisses":18297,"116":18298,"gian":18299,"git":18300,"ĠEyes":18301,"pres":18302,"Ġgraduated":18303,"Ġangel":18304,"Ġsynchron":18305,"Ġefficiently":18306,"Ġtransmitted":18307,"Harry":18308,"Ġglobally":18309,"ENCE":18310,"ĠMontana":18311,"raged":18312,"ĠPrevention":18313,"Ġpiss":18314,"ĠLl":18315,"Ġshelf":18316,"ĠBJP":18317,"ĠTestament":18318,"ĠLate":18319,"iker":18320,"ĠHapp":18321,"ĠJulian":18322,"hall":18323,"Ġspont":18324,"Ġshutdown":18325,"Ġinconsistent":18326,"Ġsubscribers":18327,"Ġskeleton":18328,"ĠNebraska":18329,"Ġinspire":18330,"ĠVoid":18331,"Feed":18332,"Ġangles":18333,"ĠSprings":18334,"Ġbenchmark":18335,"Ġvaccines":18336,"izophren":18337,"sexual":18338,"uffed":18339,"Ġshine":18340,"ĠKath":18341,"Ġgesture":18342,"inea":18343,"Ġrip":18344,"Ġoppression":18345,"Ġconscience":18346,"bt":18347,"ĠLum":18348,"Ġincidence":18349,"ĠFa":18350,"wr":18351,"Ġmineral":18352,"ĠSpurs":18353,"alky":18354,"Ġthunder":18355,"Ġopio":18356,"Being":18357,"ĠPalm":18358,"Ġwasted":18359,"Ġlb":18360,"iaries":18361,"ĠInitiative":18362,"Ġcurric":18363,"Ġmarker":18364,"ĠMcL":18365,"Ġextensions":18366,"ĠPv":18367,"ĠArms":18368,"Ġofferings":18369,"Ġdefenses":18370,"Ġvendor":18371,"Ġcontradict":18372,"ĠColin":18373,"Ġreddit":18374,"Ġperipher":18375,"122":18376,"Ġsins":18377,"Edit":18378,"ICT":18379,"Soft":18380,"ĠShah":18381,"Ġadministrator":18382,"ĠTrip":18383,"Ġpornography":18384,"Ġtuition":18385,"inence":18386,"ĠProgress":18387,"Ġcatalog":18388,"Ġsuite":18389,"Ġhike":18390,"Ġreproductive":18391,"engine":18392,"Ġdrought":18393,"ĠNoah":18394,"Ġ230":18395,"Ġdude":18396,"Ġrelaxed":18397,"Ġpartition":18398,"Ġparticipant":18399,"Ġtelesc":18400,"Ġfeas":18401,"ĠFF":18402,"owner":18403,"Ġsweeping":18404,"Ġlenses":18405,"Ġmatchup":18406,"ĠRepl":18407,"ournals":18408,"Ġcredible":18409,"Ġgrandmother":18410,"Ġthermal":18411,"Ġsubscribing":18412,"Ġidentities":18413,"colm":18414,"UCT":18415,"Ġreluctant":18416,"users":18417,"ĠCort":18418,"Ġassisted":18419,"OSS":18420,"ATIONS":18421,"ISH":18422,"Ġpharmaceutical":18423,"icable":18424,"adian":18425,"ĠSonic":18426,"ĠFury":18427,"ĠMong":18428,"AH":18429,"ĠPsychology":18430,"Ġphosph":18431,"Ġtreats":18432,"ŃĶ":18433,"Ġsteadily":18434,"ĠHello":18435,"Ġrelates":18436,"Ġclue":18437,"Expl":18438,"auth":18439,"Ġrevision":18440,"Ġeld":18441,"osion":18442,"Ġbron":18443,"144":18444,"rikes":18445,"Ġmines":18446,"Ġblanket":18447,"ĠFail":18448,"eled":18449,"ĠImagine":18450,"ĠPlanned":18451,"aic":18452,"Request":18453,"Mad":18454,"ĠHorse":18455,"ĠEagle":18456,"Ġcapac":18457,"157":18458,"Ġling":18459,"ĠNice":18460,"ĠParenthood":18461,"minster":18462,"ogs":18463,"ensitive":18464,"Nothing":18465,"Ġcarn":18466,"Fin":18467,"ĠPE":18468,"Ġrifles":18469,"ĠLP":18470,"Sand":18471,"ĠguiActive":18472,"Ġtourist":18473,"CNN":18474,"Ġunveiled":18475,"Ġpredecessor":18476,"}{":18477,"uber":18478,"Ġoffshore":18479,"Ġoptical":18480,"ĠRot":18481,"ĠPearl":18482,"eton":18483,"Ġstared":18484,"Ġfarther":18485,"atility":18486,"contin":18487,"ĠGy":18488,"ĠFoster":18489,"ĠCoc":18490,"rients":18491,"Ġdesigning":18492,"ĠEconomy":18493,"ONG":18494,"Women":18495,"ĠNancy":18496,"erver":18497,"Ġmascul":18498,"Ġcasualties":18499,"Ġ225":18500,"ĠSullivan":18501,"ĠChoice":18502,"Ġaster":18503,"ws":18504,"Ġhotels":18505,"Ġconsiderations":18506,"Ġcouch":18507,"ĠStrip":18508,"ĠGn":18509,"Ġmanipulate":18510,"lied":18511,"Ġsynthetic":18512,"Ġassaulted":18513,"Ġoffenses":18514,"ĠDrake":18515,"Ġimpe":18516,"October":18517,"ĠHeritage":18518,"hl":18519,"ĠBlair":18520,"Unlike":18521,"Ġgrief":18522,"Ġ450":18523,"Ġopted":18524,"Ġresignation":18525,"ilo":18526,"Ġverse":18527,"ĠTomb":18528,"Ġupt":18529,"Ġaired":18530,"ĠHook":18531,"ĠMLB":18532,"Ġassumes":18533,"outed":18534,"ĠVers":18535,"Ġinferior":18536,"Ġbundle":18537,"ĠDNS":18538,"ographer":18539,"Ġmultip":18540,"ĠSouls":18541,"Ġillustrated":18542,"Ġtactic":18543,"Ġdressing":18544,"Ġduo":18545,"Conf":18546,"Ġrelent":18547,"Ġcant":18548,"Ġscarce":18549,"Ġcandy":18550,"ĠCF":18551,"Ġaffiliated":18552,"Ġsprint":18553,"ylan":18554,"ĠGarcia":18555,"Ġjunk":18556,"Print":18557,"exec":18558,"Crit":18559,"Ġportrait":18560,"iries":18561,"ĠOFF":18562,"Ġdisputes":18563,"WR":18564,"Love":18565,"ãģĦ":18566,"ĠReyn":18567,"Ġhipp":18568,"opath":18569,"Ġfloors":18570,"ĠFeel":18571,"Ġworries":18572,"Ġsettlements":18573,"ĠPos":18574,"Ġmosque":18575,"Ġfinals":18576,"Ġcrushed":18577,"ĠProbably":18578,"ĠBot":18579,"ĠMans":18580,"ĠPeriod":18581,"Ġsovereignty":18582,"Ġseller":18583,"Ġapost":18584,"Ġamateur":18585,"Ġdorm":18586,"Ġconsuming":18587,"Ġarmour":18588,"ĠRoose":18589,"Ġintensive":18590,"Ġeliminating":18591,"ĠSunni":18592,"ĠAleppo":18593,"jin":18594,"Ġadvise":18595,"pal":18596,"ĠHalo":18597,"Ġdescent":18598,"Ġsimpler":18599,"Ġbooth":18600,"STR":18601,"Later":18602,"ĠCave":18603,"===":18604,"Ġmol":18605,"Ġfist":18606,"Ġshotgun":18607,"supp":18608,"Ġrobbery":18609,"Effect":18610,"Ġobscure":18611,"ĠProfessional":18612,"Ġembassy":18613,"Ġmilitant":18614,"Ġincarcer":18615,"Ġgenerates":18616,"Ġlaunches":18617,"Ġadministrators":18618,"Ġshaft":18619,"Ġcircular":18620,"Ġfreshman":18621,"ĠWes":18622,"ĠJoel":18623,"ĠDrew":18624,"ĠDuncan":18625,"ĠApparently":18626,"sight":18627,"ĠInternal":18628,"ĠIndividual":18629,"ĠFE":18630,"Ġbore":18631,"ĠMt":18632,"Ġbroadly":18633,"ĠOptions":18634,"ountain":18635,"ipes":18636,"ĠVideos":18637,"204":18638,"Ġhills":18639,"Ġsimulation":18640,"Ġdisappointment":18641,"itan":18642,"ĠLaboratory":18643,"Ġupward":18644,"Ġboundary":18645,"Ġdarker":18646,"hart":18647,"Ġdominance":18648,"Cong":18649,"ĠOracle":18650,"ĠLords":18651,"Ġscholarship":18652,"ĠVincent":18653,"ede":18654,"ĠRah":18655,"Ġencourages":18656,"rov":18657,"Ġquo":18658,"Ġpremise":18659,"ĠCrisis":18660,"ĠHolocaust":18661,"Ġrhythm":18662,"Ġmetric":18663,"club":18664,"Ġtransported":18665,"Ġnod":18666,"ĠPist":18667,"Ġancestors":18668,"ĠFreder":18669,"thumbnails":18670,"ĠCE":18671,"OND":18672,"Phil":18673,"venge":18674,"ĠProducts":18675,"castle":18676,"Ġqualifying":18677,"ĠKaren":18678,"VERTISEMENT":18679,"Ġmighty":18680,"Ġexplanations":18681,"Ġfixing":18682,"Di":18683,"Ġdeclaring":18684,"Ġanonymity":18685,"Ġjuven":18686,"ĠNord":18687,"ĠDoom":18688,"ĠActually":18689,"Ok":18690,"phis":18691,"ĠDesert":18692,"Ġ116":18693,"IK":18694,"ĠFM":18695,"Ġincomes":18696,"VEL":18697,"okers":18698,"Ġpecul":18699,"Ġlightweight":18700,"gue":18701,"Ġaccent":18702,"Ġincrement":18703,"ĠChan":18704,"Ġcomplaining":18705,"ĠBaghd":18706,"Ġmidfielder":18707,"Ġoverhaul":18708,"Process":18709,"ĠHollow":18710,"ĠTitans":18711,"Small":18712,"manuel":18713,"ĠUnity":18714,"ĠEvents":18715,"Sty":18716,"Ġdisproportion":18717,"nesty":18718,"enes":18719,"ĠCod":18720,"Ġdemonstrations":18721,"ĠCrimson":18722,"ĠOH":18723,"Ġenrolled":18724,"Ġcel":18725,"ĠBrett":18726,"Ġaide":18727,"Ġheels":18728,"Ġbroadband":18729,"Ġmarking":18730,"Ġwizard":18731,"ĠNJ":18732,"ĠChiefs":18733,"Ġingredient":18734,"Ġdug":18735,"ĠShut":18736,"urchase":18737,"endor":18738,"Ġfarmer":18739,"ĠGoldman":18740,"129":18741,"155":18742,"Order":18743,"Ġlion":18744,"iably":18745,"Ġstain":18746,"array":18747,"ilitary":18748,"ĠFAQ":18749,"Ġexploded":18750,"ĠMcCarthy":18751,"ĠTweet":18752,"ĠGreens":18753,"eking":18754,"ln":18755,"ensen":18756,"Ġmotorcycle":18757,"Ġparticle":18758,"Ġcholesterol":18759,"Bron":18760,"Ġstair":18761,"Ġoxid":18762,"Ġdesirable":18763,"ibles":18764,"Ġtheor":18765,"forcing":18766,"Ġpromotional":18767,"ovo":18768,"boot":18769,"ĠBonus":18770,"rawling":18771,"Ġshortage":18772,"ĠPsy":18773,"Ġrecruited":18774,"Ġinfants":18775,"Ġtestosterone":18776,"Ġdeduct":18777,"Ġdistinctive":18778,"Ġfirmware":18779,"built":18780,"145":18781,"Ġexplored":18782,"Ġfactions":18783,"Ġvide":18784,"Ġtattoo":18785,"Ġfinancially":18786,"Ġfatigue":18787,"Ġproceeding":18788,"constitutional":18789,"Ġmiser":18790,"Ġchairs":18791,"gging":18792,"ipple":18793,"Ġdent":18794,"Ġdisreg":18795,"çĶ":18796,"stant":18797,"llo":18798,"bps":18799,"akening":18800,"Ġabnormal":18801,"ĠERA":18802,"士":18803,"ĠHBO":18804,"ĠMAR":18805,"Ġconcess":18806,"Ġservant":18807,"Ġaspir":18808,"lav":18809,"ĠPanel":18810,"amo":18811,"Ġprecip":18812,"Ġrecordings":18813,"Ġproceeded":18814,"Ġcolony":18815,"ĠTang":18816,"ablo":18817,"Ġstripped":18818,"Left":18819,"too":18820,"Ġpotatoes":18821,"Ġfinest":18822,"%).":18823,"Ġcrap":18824,"ĠZach":18825,"abases":18826,"ĠGoth":18827,"Ġbillionaire":18828,"wolf":18829,"Ġsanction":18830,"SK":18831,"Ġlogged":18832,"Po":18833,"eyed":18834,"unal":18835,"Ġcricket":18836,"Ġarmies":18837,"Ġuncovered":18838,"Cloud":18839,"ón":18840,"Ġrebounds":18841,"Ġmes":18842,"Oper":18843,"Pac":18844,"Ġnationally":18845,"Ġinserted":18846,"pict":18847,"Ġgovernance":18848,"и":18849,"Ġprivileges":18850,"GET":18851,"Ġfavorites":18852,"imity":18853,"Ġlover":18854,"them":18855,"empl":18856,"Ġgorgeous":18857,"Ann":18858,"Ġslipped":18859,"Ġveto":18860,"Bob":18861,"Ġslim":18862,"ucc":18863,"ĠFame":18864,"uddenly":18865,"Ġdenies":18866,"ĠMaur":18867,"Ġdistances":18868,"Ġwanna":18869,"tar":18870,"ĠSER":18871,"ĠâĪ":18872,"Ġlemon":18873,"athetic":18874,"Ġliteral":18875,"Ġdistinguished":18876,"Ġanswering":18877,"GI":18878,"Ġreligions":18879,"ĠPhilos":18880,"ĠLay":18881,"Ġcompos":18882,"irements":18883,"ĠKos":18884,"inez":18885,"rolling":18886,"Ġyoungest":18887,"andise":18888,"ĠBorn":18889,"Ġaltar":18890,"amina":18891,"ĠBoot":18892,"voc":18893,"Ġdigging":18894,"Ġpressures":18895,"Ġlen":18896,"264":18897,"Ġassassination":18898,"ĠBirmingham":18899,"ĠMyth":18900,"Ġsovereign":18901,"ĠArtist":18902,"ĠPhotograph":18903,"Ġdepicted":18904,"Ġdispens":18905,"orthy":18906,"Ġambul":18907,"integ":18908,"ĠCele":18909,"ĠTibet":18910,"Ġhierarchy":18911,"Ġcu":18912,"Ġpreseason":18913,"ĠPeterson":18914,"Ġcolours":18915,"Ġworrying":18916,"Ġbackers":18917,"ĠPalmer":18918,"Ġμ":18919,"Ġcontributor":18920,"Ġhearings":18921,"Ġurine":18922,"ĠÙ":18923,"ourgeois":18924,"Similar":18925,"ĠZimmer":18926,"something":18927,"ĠUSC":18928,"Ġstrengths":18929,"ĠFI":18930,"Ġlogging":18931,"Asked":18932,"ĠThai":18933,"inqu":18934,"ĠWalt":18935,"Ġcrews":18936,"itism":18937,"301":18938,"Ġsharply":18939,"umed":18940,"Ġredirect":18941,"rators":18942,"Inf":18943,"ĠWeapons":18944,"Ġteasp":18945,"1999":18946,"Live":18947,"ĠEspecially":18948,"ĠSter":18949,"ĠVeterans":18950,"Ġintro":18951,"otherapy":18952,"Ġmalware":18953,"Ġbreeding":18954,"Ġmolecular":18955,"ĠRoute":18956,"ĠComment":18957,"ochem":18958,"Ġain":18959,"Season":18960,"Ġlinebacker":18961,"Ä«":18962,"ĠEconomics":18963,"esar":18964,"ĠLives":18965,"ĠEmma":18966,"Ġkin":18967,"ĠTerrit":18968,"Ġplanted":18969,"oton":18970,"ĠButter":18971,"ĠSpons":18972,"PER":18973,"Ġdungeon":18974,"Ġsymbolic":18975,"Ġfilmed":18976,"Ġdiets":18977,"Ġconcludes":18978,"Ġcertainty":18979,"ĠFormat":18980,"Ġstrangers":18981,"format":18982,"ĠPhase":18983,"Ġcopied":18984,"Ġmetres":18985,"lda":18986,"ĠUsers":18987,"Ġdeliberate":18988,"Ġwashed":18989,"ĠLance":18990,"imation":18991,"Ġimproper":18992,"ĠGenesis":18993,"ickr":18994,"ĠKush":18995,"Ġrealise":18996,"Ġembarrassing":18997,"alking":18998,"bucks":18999,"Ġverified":19000,"Ġoutline":19001,"years":19002,"ĠIncome":19003,"202":19004,"Ġzombies":19005,"Final":19006,"ĠMillenn":19007,"Ġmodifications":19008,"ĠVision":19009,"ĠMoses":19010,"verb":19011,"iterranean":19012,"ĠJet":19013,"Ġnaval":19014,"ĠAgg":19015,"Ġurl":19016,"Ġvictories":19017,"Ġnonetheless":19018,"Ġinjust":19019,"ĠFact":19020,"çļ":19021,"Ġinsufficient":19022,"review":19023,"facebook":19024,"Ġnegotiating":19025,"Ġguarantees":19026,"imen":19027,"utenberg":19028,"Ġgambling":19029,"Ġcongr":19030,"Loading":19031,"Ġnevertheless":19032,"Ġpresidents":19033,"ĠIndustrial":19034,"Ġ118":19035,"Ġpoured":19036,"ĠTory":19037,"Ġ175":19038,"Ġ:=":19039,"Scott":19040,"angered":19041,"Tok":19042,"Ġorganizers":19043,"Mat":19044,"ĠGrowth":19045,"Ġadul":19046,"Ġensures":19047,"Ġ117":19048,"é¾įå":19049,"Ġmassacre":19050,"Ġgrades":19051,"before":19052,"ADVERTISEMENT":19053,"ĠSlow":19054,"ĠMMA":19055,"âĢĶ\"":19056,"ĠVatican":19057,"Qaeda":19058,"Ġowe":19059,"6666":19060,"ĠSorry":19061,"ĠGrass":19062,"Ġbackgrounds":19063,"Ġexhausted":19064,"Ġclan":19065,"Ġcompromised":19066,"ĠElf":19067,"ĠIsaac":19068,"enson":19069,"Invest":19070,"IFA":19071,"Ġinterrupted":19072,"ãĥīãĥ©":19073,"Ġtwisted":19074,"ĠDragons":19075,"Mode":19076,"ĠKremlin":19077,"Ġfertil":19078,"heres":19079,"phan":19080,"ĠNode":19081,"fed":19082,"ĠOrc":19083,"Ġunwilling":19084,"Cent":19085,"Ġpriorit":19086,"Ġgraduates":19087,"Ġsubjective":19088,"Ġissuing":19089,"ĠLt":19090,"Ġviewer":19091,"Ġwoke":19092,"Thus":19093,"brook":19094,"Ġdepressed":19095,"Ġbracket":19096,"ĠGor":19097,"ĠFighting":19098,"Ġstriker":19099,"Report":19100,"ĠPortugal":19101,"Ġneo":19102,"wed":19103,"199":19104,"Ġfleeing":19105,"shadow":19106,"identified":19107,"USE":19108,"Steam":19109,"Ġstretched":19110,"Ġrevelations":19111,"arted":19112,"ĠDw":19113,"Ġalignment":19114,"eston":19115,"ĠJared":19116,"Sep":19117,"Ġblogs":19118,"update":19119,"gom":19120,"risk":19121,"Ġclash":19122,"ĠHour":19123,"Ġruntime":19124,"Ġunwanted":19125,"Ġscam":19126,"Ġrack":19127,"Ġenlight":19128,"onest":19129,"ĠFerr":19130,"Ġconvictions":19131,"Ġpiano":19132,"Ġcirculation":19133,"ĠWelcome":19134,"Ġbacklash":19135,"ĠWade":19136,"Ġreceivers":19137,"otive":19138,"Jeff":19139,"Ġnetworking":19140,"ĠPrep":19141,"ĠExplorer":19142,"Ġlecture":19143,"Ġuploaded":19144,"ĠMeat":19145,"BLE":19146,"ĠNazis":19147,"ĠSynd":19148,"stud":19149,"roots":19150,"rians":19151,"Ġportrayed":19152,"Ġ??":19153,"ĠBuddha":19154,"sun":19155,"Robert":19156,"ĠComplex":19157,"Ġoversee":19158,"Ġstealth":19159,"Title":19160,"ĠJobs":19161,"ĠKum":19162,"Ġappreciation":19163,"ĠMOD":19164,"Ġbasics":19165,"Ġclips":19166,"Ġnursing":19167,"Ġproposition":19168,"Ġrealised":19169,"ĠNYC":19170,"Ġallocated":19171,"rium":19172,"aran":19173,"ĠProduction":19174,"ĠVote":19175,"Ġsmugg":19176,"Ġhunter":19177,"azer":19178,"ĠChanges":19179,"Ġfluct":19180,"yon":19181,"Array":19182,"Ġkits":19183,"Water":19184,"Ġuncommon":19185,"Ġresting":19186,"ells":19187,"would":19188,"Ġpursued":19189,"Ġassertion":19190,"ometown":19191,"ĠMosul":19192,"ĠPlatform":19193,"iolet":19194,"Ġshareholders":19195,"Ġtrails":19196,"Pay":19197,"ĠEnforcement":19198,"types":19199,"ĠAnonymous":19200,"Ġsatisfying":19201,"ilogy":19202,"Ġ('":19203,"wave":19204,"city":19205,"Steve":19206,"Ġconfrontation":19207,"ĠEld":19208,"Capt":19209,"ahan":19210,"htm":19211,"ĠCtrl":19212,"ONS":19213,"230":19214,"ifa":19215,"holding":19216,"Ġdelicate":19217,"Ġjaw":19218,"ĠGoing":19219,"orum":19220,"Sal":19221,"Ġdull":19222,"ĠBeth":19223,"Ġprisons":19224,"Ġego":19225,"ĠElsa":19226,"avorite":19227,"ĠGang":19228,"ĠNuclear":19229,"Ġspider":19230,"atsu":19231,"Ġsampling":19232,"Ġabsorbed":19233,"ĠPharm":19234,"ieth":19235,"Ġbucket":19236,"ĠRecomm":19237,"OF":19238,"ĠFactory":19239,"ANCE":19240,"Ġbacter":19241,"Has":19242,"ĠObserv":19243,"121":19244,"Ġpremiere":19245,"Develop":19246,"Ġcurrencies":19247,"Cast":19248,"Ġaccompanying":19249,"ĠNashville":19250,"Ġfatty":19251,"ĠBrend":19252,"Ġlocks":19253,"Ġcentered":19254,"ĠUT":19255,"aughs":19256,"orie":19257,"ĠAffordable":19258,"vance":19259,"DL":19260,"emet":19261,"Ġthrone":19262,"ĠBluetooth":19263,"Ġnaming":19264,"ifts":19265,"ADE":19266,"Ġcorrected":19267,"Ġpromptly":19268,"ĠSTR":19269,"Ġgenome":19270,"Ġcope":19271,"Ġvalley":19272,"Ġrounded":19273,"ĠKend":19274,"alion":19275,"pers":19276,"Ġtourism":19277,"Ġstark":19278,"vl":19279,"Ġblowing":19280,"ĠSchedule":19281,"std":19282,"Ġunhappy":19283,"Ġlitigation":19284,"cedes":19285,"Ġandroid":19286,"Ġintegral":19287,"erers":19288,"uded":19289,"tax":19290,"Ġreiter":19291,"ĠMotors":19292,"ociated":19293,"Ġwonders":19294,"ĠApost":19295,"ucking":19296,"ĠRoosevelt":19297,"fram":19298,"Ġyields":19299,"Ġconstitutes":19300,"awk":19301,"Interest":19302,"Ġinterim":19303,"Ġbreakthrough":19304,"ĠCher":19305,"Ġprosec":19306,"ĠDj":19307,"ĠMT":19308,"Resp":19309,"ĠPT":19310,"Ġsperm":19311,"edit":19312,"BT":19313,"Linux":19314,"country":19315,"league":19316,"Ġdick":19317,"Ġoct":19318,"Ġinserting":19319,"Ġscra":19320,"ĠBrewing":19321,"Ġ1966":19322,"Ġrunners":19323,"Ġplun":19324,"idy":19325,"ĠDian":19326,"Ġdysfunction":19327,"Ġexclusion":19328,"Ġdisgr":19329,"Ġincorporate":19330,"Ġreconc":19331,"Ġnominated":19332,"ĠArcher":19333,"draw":19334,"achelor":19335,"Ġwritings":19336,"Ġshallow":19337,"Ġhast":19338,"ĠBMW":19339,"ĠRS":19340,"Ġthigh":19341,"Ġ1963":19342,"Ġlamb":19343,"Ġfavored":19344,"agle":19345,"Ġcooler":19346,"ĠHours":19347,"ĠGU":19348,"ĠOrigin":19349,"Ġglimpse":19350,"--------------------":19351,"Lim":19352,"Ġcheek":19353,"Ġjealous":19354,"-'":19355,"Ġharness":19356,"ĠPoison":19357,"Ġdisabilities":19358,"neapolis":19359,"Ġoutlook":19360,"Ġnotify":19361,"ĠIndianapolis":19362,"Ġabrupt":19363,"nsic":19364,"Ġencrypted":19365,"Ġforfe":19366,"reath":19367,"Ġrabb":19368,"Ġfoundations":19369,"Ġcompliment":19370,"ĠInterview":19371,"ĠSwe":19372,"Ġadolesc":19373,"Ġmonitors":19374,"ĠSacramento":19375,"Ġtimely":19376,"Ġcontempl":19377,"Ġpositioned":19378,"Ġposters":19379,"phies":19380,"iovascular":19381,"void":19382,"ĠFifth":19383,"Ġinvestigative":19384,"OUN":19385,"Ġintegrate":19386,"ĠINC":19387,"isha":19388,"iblings":19389,"ĠRequest":19390,"ĠRodriguez":19391,"Ġslides":19392,"ĠDX":19393,"Ġfeminism":19394,"Ġdatas":19395,"Ġbend":19396,"irus":19397,"ĠNigeria":19398,"Fox":19399,"Change":19400,"Ġairplane":19401,"ĠLaden":19402,"Ġpublicity":19403,"ixty":19404,"Ġcommitments":19405,"Ġaggregate":19406,"Ġdisplaying":19407,"ĠArrow":19408,"Ġ122":19409,"Ġrespects":19410,"android":19411,"six":19412,"ĠSha":19413,"Ġrestoration":19414,")\\":19415,"WS":19416,"oys":19417,"Ġillustrate":19418,"without":19419,"126":19420,"ĠâĶĤ":19421,"Ġpickup":19422,"nels":19423,"Ġ....":19424,"food":19425,"ĠFen":19426,")?":19427,"Ġphenomena":19428,"Ġcompanions":19429,"ĠWrite":19430,"Ġspill":19431,"Ġbridges":19432,"ĠUpdated":19433,"ĠFo":19434,"Ġinsects":19435,"ASHINGTON":19436,"Ġscare":19437,"iltr":19438,"ĠZhang":19439,"Ġseverity":19440,"Ġindul":19441,"149":19442,"ĠCoffee":19443,"Ġnorms":19444,"Ġpulse":19445,"ĠFT":19446,"Ġhorrific":19447,"ĠDestroy":19448,"ĠJSON":19449,"Ġolive":19450,"Ġdiscusses":19451,"Rest":19452,"Elect":19453,"ĠWinn":19454,"ĠSurviv":19455,"ĠHait":19456,"Sure":19457,"oped":19458,"Ġrooted":19459,"ĠSke":19460,"ĠBronze":19461,"Ġlol":19462,"Default":19463,"Ġcommodity":19464,"redited":19465,"Ġlibertarian":19466,"Ġforbidden":19467,"Ġgran":19468,"à¨":19469,"Ġlag":19470,"enz":19471,"drive":19472,"Ġmathematics":19473,"Ġwires":19474,"Ġcritically":19475,"Ġcarbohyd":19476,"ĠChancellor":19477,"ĠEddie":19478,"Ġbanning":19479,"ĠFri":19480,"Ġcomplications":19481,"etric":19482,"ĠBangladesh":19483,"Ġbandwidth":19484,"Stop":19485,"ĠOriginally":19486,"Ġhalfway":19487,"ynasty":19488,"shine":19489,"Ġtales":19490,"rities":19491,"avier":19492,"Ġspinning":19493,"ĠWHO":19494,"Ġneighbourhood":19495,"bach":19496,"Ġcommerce":19497,"ĠSle":19498,"BU":19499,"Ġentrepreneur":19500,"Ġpeculiar":19501,"ĠComments":19502,"fre":19503,"320":19504,"ICS":19505,"Ġimagery":19506,"ĠCanon":19507,"ĠElectronic":19508,"short":19509,"((":19510,"Dig":19511,"Ġcommem":19512,"uced":19513,"Ġinclined":19514,"ĠSummon":19515,"Ġcliff":19516,"ĠMediterranean":19517,"Ġpoetry":19518,"Ġprosperity":19519,"ĠRece":19520,"Ġpills":19521,"member":19522,"Ġfinale":19523,"unc":19524,"ĠGig":19525,"ä½":19526,"Ġlod":19527,"Ġbackward":19528,"-+":19529,"ĠForward":19530,"Ġthri":19531,"sure":19532,"Ġsoap":19533,"ĠFX":19534,"RES":19535,"ĠSexual":19536,"oulos":19537,"Ġfoolish":19538,"Ġrighteous":19539,"Ġcoff":19540,"terrorism":19541,"ustain":19542,"oter":19543,"Ġabuses":19544,"next":19545,"Ġabusive":19546,"Ġthereafter":19547,"Ġprohibition":19548,"ĠSUP":19549,"Ġdip":19550,"Ġripped":19551,"Ġinherited":19552,"Ġbats":19553,"stru":19554,"GT":19555,"Ġflawed":19556,"phabet":19557,"Ġfog":19558,"doors":19559,"Ġimaging":19560,"Ġdigits":19561,"ĠHungary":19562,"Ġarrog":19563,"Ġteachings":19564,"Ġprotocols":19565,"ĠBanks":19566,"à¸":19567,"pound":19568,"ĠCurt":19569,".\")":19570,"./":19571,"Ġexemption":19572,"endix":19573,"ĠMull":19574,"Ġimproves":19575,"ĠGamer":19576,"dimensional":19577,"Icon":19578,"ĠMargaret":19579,"Status":19580,"dates":19581,"Ġintends":19582,"Ġdepict":19583,"Ġparked":19584,"Joe":19585,"ĠMarines":19586,"chnology":19587,"!).":19588,"Ġjudged":19589,"Ġweights":19590,"Ray":19591,"Ġapartments":19592,"hester":19593,"Ġreinforce":19594,"Ġoffender":19595,"occup":19596,"Ġsore":19597,"ept":19598,"ĠPHP":19599,"ĠBrow":19600,"Ġauthorization":19601,"ĠRisk":19602,"ĠDelaware":19603,"ĠQU":19604,"Ġnotifications":19605,"Ġsunlight":19606,"Ġexclude":19607,"dat":19608,"Ġmesh":19609,"ĠSudan":19610,"Ġbelonged":19611,"Ġsubway":19612,"Ġnoon":19613,"ĠInterior":19614,"olics":19615,"ĠLakers":19616,"Ġcoding":19617,"Disclaimer":19618,"Calif":19619,"Old":19620,"Ġdisl":19621,"?????":19622,"Ġconfirms":19623,"Ġrecruitment":19624,"Ġhomicide":19625,"Consider":19626,"ĠJeffrey":19627,"fty":19628,"};":19629,"Ġobjection":19630,"doing":19631,"ĠLeo":19632,"Want":19633,"Ġglow":19634,"ĠClarke":19635,"ĠNorman":19636,"Ġverification":19637,"Ġpacket":19638,"ĠFormula":19639,"Ġplag":19640,"esville":19641,"Ġshouting":19642,"Ġov":19643,"ĠREC":19644,"ĠBub":19645,"Ġninth":19646,"Ġenerg":19647,"Ġvalidity":19648,"Ġups":19649,"jack":19650,"Ġneighboring":19651,"ĠNec":19652,"eworks":19653,"ĠHab":19654,"arez":19655,"Ġspine":19656,"Ġeventual":19657,"ĠLeaders":19658,"ĠCarn":19659,"Ġprobation":19660,"Ġromance":19661,"msg":19662,"ĠMechanical":19663,"ERY":19664,"Rock":19665,"Ġpartisan":19666,"Node":19667,"assets":19668,"minent":19669,"Ġforeigners":19670,"Ġtestify":19671,"ĠUsually":19672,"lords":19673,"ĠGren":19674,"ĠPowell":19675,"BIL":19676,"Ġsr":19677,"Ġaddict":19678,"Ġshells":19679,"Ġsigh":19680,"ĠYale":19681,"ternity":19682,"Ġ750":19683,"EU":19684,"ĠRifle":19685,"Ġpatron":19686,"ema":19687,"ĠBannon":19688,"anity":19689,"Ġtropical":19690,"ĠVII":19691,"cross":19692,"Everything":19693,"ĠISO":19694,"Ġhumble":19695,"assing":19696,"ĠFIG":19697,"Ġupdating":19698,"yson":19699,"Ġcalcium":19700,"Ġcompetent":19701,"Ġsteering":19702,"Prot":19703,"ĠSY":19704,"ĠFinals":19705,"ĠRug":19706,"159":19707,"137":19708,"ĠGolf":19709,"Ġ126":19710,"Ġaccommodation":19711,"ĠHughes":19712,"Ġaesthetic":19713,"artisan":19714,"ĠTwilight":19715,"Ġprince":19716,"ĠAgriculture":19717,"ĠDisco":19718,"Ġprecedent":19719,"Ġtyping":19720,"authorized":19721,"Option":19722,"ĠAub":19723,"lishes":19724,"acht":19725,"mag":19726,"Peter":19727,"ĠUFO":19728,"monton":19729,"ĠLith":19730,"Ġarom":19731,"Ġsecuring":19732,"Ġconfined":19733,"private":19734,"Ġswords":19735,"Ġmarkers":19736,"Ġmetabolic":19737,"select":19738,"ĠCurse":19739,"ĠOt":19740,"gressive":19741,"Ġincumb":19742,"ĠSaga":19743,"Ġpriced":19744,"Ġclearance":19745,"Content":19746,"Ġdrilling":19747,"Ġnotices":19748,"Ġbourgeois":19749,"Ġvest":19750,"Ġcookie":19751,"ĠGuardians":19752,"rys":19753,"inyl":19754,"Ġ124":19755,"Ġplausible":19756,"ongh":19757,"ĠOdin":19758,"Ġconception":19759,"ĠYuk":19760,"ĠBaghdad":19761,"ĠFlag":19762,"Austral":19763,"ĠIBM":19764,"Ġinternationally":19765,"ĠWikiLeaks":19766,"IED":19767,"Ġcyn":19768,"Ġchooses":19769,"ĠPill":19770,"Ġcombining":19771,"Ġradi":19772,"ĠMohammed":19773,"defense":19774,"atching":19775,"Subject":19776,"iciency":19777,"Frame":19778,"Ġ{\"":19779,"Ġchess":19780,"Ġtimer":19781,"190":19782,"Ġtin":19783,"Ġordinance":19784,"emetery":19785,"Ġaccusing":19786,"Ġnoticeable":19787,"Ġcentres":19788,"Ġlid":19789,"ĠMills":19790,"imgur":19791,"Ġzoom":19792,"ergic":19793,"Ġcompression":19794,"prim":19795,"find":19796,"Ġsurg":19797,"Ġpand":19798,"ĠKee":19799,"ĠChad":19800,"cellence":19801,"oyle":19802,"Ġsocialism":19803,"ĠTravis":19804,"ĠMHz":19805,"Ġguild":19806,"ALLY":19807,"ĠSubscribe":19808,"ĠRelated":19809,"Ġoccurrence":19810,"itching":19811,"Ġfictional":19812,"Ġcrush":19813,"ĠEA":19814,"cod":19815,"mix":19816,"ĠTriple":19817,"Ġretrieve":19818,"Ġstimulus":19819,"Ġpsychiat":19820,"ĠDoor":19821,"Ġhomosexuality":19822,"Ġelementary":19823,"Ġcellular":19824,"idian":19825,"ĠLaun":19826,"Ġintriguing":19827,"Ġfoam":19828,"ĠBass":19829,"idi":19830,"itsu":19831,"Ġassure":19832,"Ġcongrat":19833,"Ġbusinessman":19834,"ĠBoost":19835,"close":19836,"Ġlied":19837,"Ġsciences":19838,"ĠOmega":19839,"ĠGraphics":19840,"Ġ<=":19841,"spoken":19842,"Ġconnectivity":19843,"Saturday":19844,"ĠAvengers":19845,"Ġtoggle":19846,"Ġankle":19847,"Ġnationalist":19848,"model":19849,"ĠPool":19850,"ophobia":19851,"Var":19852,"ĠMons":19853,"atories":19854,"Ġaggressively":19855,"Clear":19856,"Forge":19857,"acters":19858,"Ġhedge":19859,"Ġpipes":19860,"Ġblunt":19861,"Ġsq":19862,"Ġremotely":19863,"Wed":19864,"asers":19865,"Ġrefriger":19866,"Ġtiles":19867,"Ġrescued":19868,"Ġcomprised":19869,"insky":19870,"Ġmanif":19871,"avanaugh":19872,"Ġprolifer":19873,"Ġaligned":19874,"xml":19875,"Ġtriv":19876,"Ġcoordination":19877,"ĠPER":19878,"ĠQuote":19879,"134":19880,"bf":19881,"ĠSaw":19882,"Ġtermination":19883,"Ġ190":19884,"Ġadditions":19885,"Ġtrio":19886,"Ġprojections":19887,"Ġpositively":19888,"Ġinclusive":19889,"Ġmembr":19890,"1990":19891,"older":19892,"Ġpracticed":19893,"inkle":19894,"Arch":19895,"Ġstarters":19896,"arius":19897,"Ġintermediate":19898,"ĠBenef":19899,"ĠKiller":19900,"Ġinterventions":19901,"ĠKil":19902,"ĠFlying":19903,"Inv":19904,"Ġpremature":19905,"Ġpsychiatric":19906,"Ġindie":19907,"Ġcollar":19908,"ĠRainbow":19909,"afi":19910,"Ġdisruption":19911,"ĠFOX":19912,"casting":19913,"Ġmisdem":19914,"cro":19915,"Ġwipe":19916,"ardon":19917,"Ġbast":19918,"ĠTommy":19919,"ĠRepresentative":19920,"Ġbelly":19921,"ĠPO":19922,"ĠBreitbart":19923,"132":19924,"Ġmessaging":19925,"Should":19926,"References":19927,"ĠGRE":19928,"istical":19929,"LP":19930,"ĠCav":19931,"ĠCrazy":19932,"Ġintuitive":19933,"keeping":19934,"ĠMoss":19935,"Ġdiscontin":19936,"ĠModule":19937,"Ġunrelated":19938,"ĠPractice":19939,"ĠTransport":19940,"Ġstatistically":19941,"orns":19942,"Ġsized":19943,"pu":19944,"Ġcaf":19945,"ĠWorlds":19946,"ĠRodgers":19947,"ĠLun":19948,"ĠComic":19949,"living":19950,"Ġcared":19951,"Ġclimbed":19952,"){":19953,"Ġconsisted":19954,"Ġmedieval":19955,"folk":19956,"Ġhacked":19957,"Ġdire":19958,"ĠHermione":19959,"Ġtended":19960,"ceans":19961,"Daniel":19962,"went":19963,"Ġlegislators":19964,"Ġredes":19965,"games":19966,"Ġgn":19967,"amiliar":19968,"Ġ++":19969,"ggy":19970,"threat":19971,"Ġmagnet":19972,"Ġperceive":19973,"Ġzip":19974,"Ġindictment":19975,"Ġcritique":19976,"gard":19977,"ĠSafe":19978,"ĠCream":19979,"Ġadvent":19980,"oba":19981,"Ġvowed":19982,"ousands":19983,"Ġski":19984,"Ġabortions":19985,"uart":19986,"Ġstunned":19987,"Ġadvancing":19988,"Ġlacked":19989,"Ġ\\\"":19990,"Ġschizophren":19991,"Ġelegant":19992,"Ġconferences":19993,"Ġcanceled":19994,"ĠHudson":19995,"ĠHopefully":19996,"Ġtrump":19997,"Ġfrequencies":19998,"Ġmeteor":19999,"ĠJunior":20000,"ĠFleet":20001,"ĠMalcolm":20002,"ĠTools":20003,"Ġ........":20004,"Ġhobby":20005,"ĠEuropeans":20006,"Ġ1500":20007,"ĠInto":20008,"Ġsway":20009,"ĠAppro":20010,"ĠCompl":20011,"Community":20012,"Ġtide":20013,"ĠSummit":20014,"ä»":20015,"Ġintervals":20016,"ĠEther":20017,"Ġhabitat":20018,"ĠStevens":20019,"lishing":20020,"ĠDomain":20021,"Ġtriggers":20022,"Ġchasing":20023,"Ġcharm":20024,"ĠFlower":20025,"itored":20026,"Ġblessing":20027,"Ġtextures":20028,"Five":20029,"Ġliquor":20030,"RP":20031,"FIN":20032,"Ġ1962":20033,"CAR":20034,"Unknown":20035,"Ġresil":20036,"ĠLily":20037,"Ġabundance":20038,"Ġpredictable":20039,"rar":20040,"Ġbullshit":20041,"leen":20042,"chet":20043,"Mor":20044,"Much":20045,"ä¹":20046,"Ġemphasized":20047,"Ġcrust":20048,"Ġprimitive":20049,"Ġenjoyable":20050,"ĠPictures":20051,"Ġteammate":20052,"pler":20053,"ĠTol":20054,"ĠKane":20055,"Ġsummoned":20056,"thy":20057,"rama":20058,"ĠHonda":20059,"Ġrealizing":20060,"Ġquicker":20061,"Ġconcentrate":20062,"clear":20063,"Ġ210":20064,"ĠErdogan":20065,"aris":20066,"Ġresponds":20067,"ĠBI":20068,"Ġeligibility":20069,"Ġpushes":20070,"ĠIdaho":20071,"Ġaggrav":20072,"Ġruins":20073,"urations":20074,"Ġbans":20075,"Ġanat":20076,"share":20077,"Ġgrind":20078,"hin":20079,"umen":20080,"Ġutilities":20081,"ĠYankees":20082,"Ġdatabases":20083,"ĠDD":20084,"Ġdisplaced":20085,"Ġdependencies":20086,"Ġstimulation":20087,"hun":20088,"houses":20089,"ĠPretty":20090,"ĠRavens":20091,"ĠTODAY":20092,"Ġassociates":20093,"Ġtherape":20094,"cled":20095,"Ġdeer":20096,"Ġrepairs":20097,"rentice":20098,"Ġreceptors":20099,"Ġremed":20100,"ĠCe":20101,"Ġmarriages":20102,"Ġballots":20103,"ĠSoldier":20104,"Ġhilarious":20105,"opl":20106,"138":20107,"Ġinherently":20108,"Ġignorant":20109,"Ġbounce":20110,"ĠEaster":20111,"RELATED":20112,"ĠCurrency":20113,"EV":20114,"ãĥŀ":20115,"ĠLead":20116,"Ġdeceased":20117,"Brien":20118,"ĠMusk":20119,"JS":20120,"Ġmerge":20121,"hearted":20122,"creat":20123,"mitt":20124,"mund":20125,"ĠâĢĭ":20126,"ĠBag":20127,"Ġprojection":20128,"Ġjava":20129,"ĠStandards":20130,"ĠLeonard":20131,"Ġcoconut":20132,"ĠPopulation":20133,"Ġtraject":20134,"Ġimply":20135,"Ġcuriosity":20136,"ĠDB":20137,"ĠFresh":20138,"ĠPor":20139,"Ġheavier":20140,"neys":20141,"gomery":20142,"Ġdeserved":20143,"Ġphrases":20144,"ĠGC":20145,"Ġyeast":20146,"desc":20147,"Death":20148,"Ġreboot":20149,"Ġmetadata":20150,"ICAL":20151,"Ġrepay":20152,"ĠIndependence":20153,"Ġsuburban":20154,"icals":20155,"Ġatop":20156,"Ġallocation":20157,"generation":20158,"ĠGram":20159,"Ġmoisture":20160,"Ġpine":20161,"ĠLiberals":20162,"Ġaides":20163,"Ġunderest":20164,"ĠBerry":20165,"Ġceremon":20166,"370":20167,"astrous":20168,"ĠPirates":20169,"Ġtense":20170,"ĠIndustries":20171,"ĠAppeals":20172,"ĠNear":20173,"Ġè£ıç":20174,"Ġlovers":20175,"ĠCAP":20176,"ĠCraw":20177,"Ġgiants":20178,"Ġefficacy":20179,"Element":20180,"ĠBehavior":20181,"ĠToyota":20182,"Ġintest":20183,"Priv":20184,"AI":20185,"Ġmaneuver":20186,"Ġperfection":20187,"Ġbang":20188,"paper":20189,"rill":20190,"George":20191,"border":20192,"inters":20193,"ĠSeth":20194,"Ġclues":20195,"ĠLevi":20196,"ĠRevenue":20197,"147":20198,"Ġvapor":20199,"Ġfortunate":20200,"Ġthreatens":20201,"Ġvet":20202,"Ġdependency":20203,"ersed":20204,"article":20205,"ĠBlizzard":20206,"Ġchlor":20207,"Ġminus":20208,"ĠBills":20209,"Ġcryptocurrency":20210,"Ġmetabolism":20211,"tering":20212,"Ġpestic":20213,"steps":20214,"ĠTreasure":20215,"racted":20216,"ĠConstant":20217,"Ġtemp":20218,"139":20219,"ĠDetective":20220,"urally":20221,"Ġrecovering":20222,"Ġcortex":20223,"Ġ144":20224,"closed":20225,"Ġprejudice":20226,"aunted":20227,"Ġstorms":20228,"ĠNOW":20229,"Ġmachinery":20230,"Address":20231,"Ġcompelled":20232,"270":20233,"Ġdespair":20234,"bane":20235,"Ġvegetable":20236,"Ġbeds":20237,"Learn":20238,"Ġcolorful":20239,"Ġspike":20240,"Ġmargins":20241,"Ġsympathy":20242,"Ġworkshop":20243,"ĠCBC":20244,"Sat":20245,"Ġburns":20246,"ĠGender":20247,"Ġ129":20248,"ĠCable":20249,"Ġdebts":20250,"ĠTheresa":20251,"Ġreflecting":20252,"Ġairst":20253,"Ġrim":20254,"ramid":20255,"Ġweaknesses":20256,"Writ":20257,"oggle":20258,"ti":20259,"ĠCharge":20260,"Ġweighed":20261,"Ġ(.":20262,"Ġlaughter":20263,"Ġrouter":20264,"ĠDemocracy":20265,"Dear":20266,"Ġhasht":20267,"Ġdy":20268,"Ġhints":20269,"running":20270,"Ġfinishes":20271,"arus":20272,"Mass":20273,"result":20274,"ascus":20275,"Ġvintage":20276,"Ġconqu":20277,"Ġwildly":20278,"acist":20279,"Ġlingu":20280,"Ġprotagonist":20281,"strom":20282,"teenth":20283,"ĠSolo":20284,"mac":20285,"filled":20286,"Ġrenown":20287,"itives":20288,"Ġmotive":20289,"ĠAntar":20290,"ĠMann":20291,"ĠAdjust":20292,"Ġrockets":20293,"Ġtroubling":20294,"ei":20295,"Ġorganisms":20296,"assis":20297,"Christian":20298,"Ġ145":20299,"ĠHass":20300,"Ġswall":20301,"Ġwax":20302,"ĠSurvival":20303,"VS":20304,"ĠMurd":20305,"vd":20306,"standard":20307,"Ġdragons":20308,"Ġacceleration":20309,"rational":20310,"final":20311,"Ġpaired":20312,"ĠEthereum":20313,"Ġinterfaces":20314,"Ġresent":20315,"Ġartifacts":20316,"Å«":20317,"arel":20318,"Ġcompetitor":20319,"ĠNicholas":20320,"ĠSurface":20321,"cpp":20322,"ĠTot":20323,"Ġeconomically":20324,"Ġorganised":20325,"Ġenforced":20326,"inho":20327,"Ġvarieties":20328,"Ġabdom":20329,"ĠBailey":20330,"idav":20331,"ĠSalv":20332,"paid":20333,"Ġaltitude":20334,"essert":20335,"ĠGutenberg":20336,"area":20337,"opoulos":20338,"Ġprofessors":20339,"iggs":20340,"ĠFate":20341,"hey":20342,"Ġ3000":20343,"Dist":20344,"Ġtwins":20345,"cill":20346,"ĠMaps":20347,"Ġtraps":20348,"Ġweed":20349,"ĠKiss":20350,"Ġyoga":20351,"Ġrecipients":20352,"ĠWestminster":20353,"Ġpools":20354,"ĠWalmart":20355,"188":20356,"ĠSchools":20357,"attack":20358,"ĠARM":20359,"paragraph":20360,"Warning":20361,"jl":20362,"Ġselfish":20363,"anchez":20364,"ĠHeights":20365,"Fre":20366,"ĠSoph":20367,"Ġ--------------------------------":20368,"tml":20369,"333":20370,"Ġraids":20371,"Ġsatellites":20372,"KEY":20373,"Ġlasts":20374,"ÑĤ":20375,"Ins":20376,"ĠDame":20377,"Ġunpredict":20378,"///":20379,"ghai":20380,"Ġartillery":20381,"Ġcruise":20382,"Ġgel":20383,"ĠCabinet":20384,"Ġblows":20385,"ĠEsp":20386,"Ġproximity":20387,"othe":20388,"ĠSkills":20389,"ĠUpper":20390,"obo":20391,"ĠNDP":20392,"Ġenjoys":20393,"Ġrepeating":20394,"ĠConstruction":20395,"ĠQuestions":20396,"Hillary":20397,"Ġuint":20398,"Ġprocessors":20399,"ĠGibson":20400,"ĠMultiple":20401,"qa":20402,"ĠBom":20403,"ĠMiles":20404,"ventional":20405,"Ġhurts":20406,"skin":20407,"ĠAIDS":20408,"Ġadvisers":20409,"ĠRoot":20410,"Ġmethodology":20411,"ĠDale":20412,"Ġdeton":20413,"ĠKnowledge":20414,"sequently":20415,"Ġ121":20416,"Ġconnects":20417,"Cy":20418,"ĠDanger":20419,"Ġcontributors":20420,"ĠBent":20421,"Ġbrass":20422,"ĠGuns":20423,"into":20424,"ĠFortune":20425,"Ġbroker":20426,"balance":20427,"Ġlengths":20428,"Ġvic":20429,"Ġaveraging":20430,"Ġappropriately":20431,"ĠCamera":20432,"Ġsandwich":20433,"ĠCDC":20434,"Ġcoordinate":20435,"Ġnavig":20436,"Ġgoodness":20437,"laim":20438,"Ġbrake":20439,"Ġextremist":20440,"ĠWake":20441,"ĠMend":20442,"ĠTiny":20443,"ĠCOL":20444,"ĠRF":20445,"ĠDual":20446,"ĠWine":20447,"Case":20448,"Ġrefined":20449,"Ġlamp":20450,"Lead":20451,"Ġbapt":20452,"ĠCarb":20453,"ĠSadd":20454,"ĠMinneapolis":20455,"PDF":20456,"Early":20457,"ĠHidden":20458,"Its":20459,"ĠTIME":20460,"Ġpap":20461,"Ġcommissioned":20462,"ĠFew":20463,"ĠColts":20464,"ĠBren":20465,"Ġbothered":20466,"Ġlikewise":20467,"Exper":20468,"ĠSchw":20469,"cry":20470,"nn":20471,"ĠMitch":20472,"imon":20473,"MG":20474,"bm":20475,"UMP":20476,"rays":20477,"Ġregistry":20478,"Ġ270":20479,"achine":20480,"rella":20481,"anting":20482,"00000":20483,"Ġruined":20484,"spot":20485,"Ġta":20486,"Ġmaximize":20487,"Ġinconven":20488,"Dead":20489,"Human":20490,"Enabled":20491,"ĠMarie":20492,"Ġchill":20493,"ĠParadise":20494,"Ġstarring":20495,"ĠLatino":20496,"ĠProtocol":20497,"ĠEVER":20498,"Ġsuppliers":20499,"message":20500,"ĠBrock":20501,"Ġserum":20502,"âĸĪâĸĪâĸĪâĸĪ":20503,"Ġencomp":20504,"Ġambition":20505,"uese":20506,"Ġarrows":20507,"Andrew":20508,"Ġantenna":20509,"Ġ1961":20510,"ĠBark":20511,"Ġbool":20512,"ãĤª":20513,"ĠStorage":20514,"Ġrailway":20515,"Ġtougher":20516,"ĠCad":20517,"Ġwashing":20518,"Py":20519,"']":20520,"embed":20521,"ĠMemphis":20522,"ackle":20523,"Ġfamously":20524,"ĠFortunately":20525,"ovies":20526,"Ġmindset":20527,"Ġsneak":20528,"ĠDh":20529,"RAW":20530,"ĠSimpson":20531,"Ġlivest":20532,"Ġlandmark":20533,"Ġcement":20534,"Low":20535,"Ġthrilled":20536,"ĠCourse":20537,"inel":20538,"Ġchuck":20539,"idate":20540,"global":20541,"Ġwhit":20542,"Ġ�":20543,"adays":20544,"ski":20545,"ĠSV":20546,"Ġviruses":20547,"306":20548,"ĠRespons":20549,"Ġtheaters":20550,"ĠBranch":20551,"ĠGeneva":20552,"ĠMK":20553,"Ġunbeliev":20554,"Ġcommunist":20555,"Original":20556,"ĠReceived":20557,"ĠTransfer":20558,"ĠArg":20559,"Input":20560,"ĠStrategy":20561,"Ġpalace":20562,"thening":20563,"Dri":20564,"Ġsentencing":20565,"umbnail":20566,"Ġpins":20567,"recy":20568,"Ġsiblings":20569,"Getting":20570,"ĠBU":20571,"ĠNorthwest":20572,"Ġprolonged":20573,"ĠSakura":20574,"Comb":20575,"ĠBour":20576,"Ġinadequate":20577,"ĠKash":20578,"Ġusername":20579,"ĠImprove":20580,"Ġbattling":20581,"ĠMAC":20582,"Ġcurriculum":20583,"Ġsoda":20584,"ĠCannon":20585,"Ġsensible":20586,"spons":20587,"December":20588,"Ġwicked":20589,"ĠPengu":20590,"Ġdictators":20591,"ĠHearts":20592,"ogyn":20593,"Ġsimilarities":20594,"ĠStats":20595,"Ġhollow":20596,"itations":20597,"\":[":20598,"Ġhover":20599,"ĠListen":20600,"sch":20601,"Sund":20602,"Ġcad":20603,"ĠParks":20604,"Ġlur":20605,"Ġhype":20606,"ĠLem":20607,"NAME":20608,"isure":20609,"Friday":20610,"Ġshoots":20611,"Ġcloses":20612,"Ġdb":20613,"ĠRidge":20614,"ĠDifferent":20615,"Ġreplies":20616,"ĠBroadway":20617,"opers":20618,"Ġintoler":20619,"ĠZeus":20620,"akespe":20621,"Ġproprietary":20622,"Ġrequesting":20623,"Ġcontrollers":20624,"ĠMIN":20625,"imedia":20626,"becca":20627,"Ġexpans":20628,"Ġoils":20629,"Bot":20630,"ĠChand":20631,"Ġprinter":20632,"Ġtopped":20633,"ĠPOL":20634,"ĠEarlier":20635,"Social":20636,"avin":20637,"Ġdecreases":20638,"ĠSeb":20639,"Ġspecifications":20640,"ĠBlast":20641,"ĠKurt":20642,"Ġfreel":20643,"Brown":20644,"Ġdilig":20645,"roe":20646,"ĠProblem":20647,"ĠQuad":20648,"Ġdecentral":20649,"ĠVector":20650,"anut":20651,"Ġplugins":20652,"ĠGregory":20653,"Ġfucked":20654,"elines":20655,"ĠAmbassador":20656,"take":20657,"Ġcleans":20658,"ongyang":20659,"Anonymous":20660,"stro":20661,"\"}":20662,"aline":20663,"ĠOdd":20664,"ĠEug":20665,"216":20666,"Ġboil":20667,"ĠPowers":20668,"Ġnurses":20669,"Obviously":20670,"ĠTechnical":20671,"Ġexceeded":20672,"ORS":20673,"Ġextremists":20674,"Ġtraces":20675,"expl":20676,"Ġcomr":20677,"ĠSach":20678,")/":20679,"Ġmasks":20680,"Ġsci":20681,"Bon":20682,"Ġregression":20683,"wegian":20684,"Ġadvisor":20685,"itures":20686,"ĠVo":20687,"example":20688,"ĠInstruct":20689,"Ġsiege":20690,"Ġreductions":20691,"ptr":20692,"Ġstatutory":20693,"Ġremoves":20694,"Ġpuck":20695,"redits":20696,"Ġbee":20697,"Ġsalad":20698,"Ġpromotions":20699,"ĠJoshua":20700,"withstanding":20701,"ETH":20702,"ĠCha":20703,"imus":20704,"Ġexpenditure":20705,"aunting":20706,"Ġdelighted":20707,"Ġ155":20708,"beh":20709,"Ġcarpet":20710,"ĠSpart":20711,"Ġjungle":20712,"lists":20713,"Ġbullying":20714,"ĠNobel":20715,"ĠGlen":20716,"Ġreferenced":20717,"Ġintroduces":20718,"sein":20719,"Ġchopped":20720,"glass":20721,"ĠWrest":20722,"Ġneutrality":20723,"ĠâĻ":20724,"Ġinvestigator":20725,"Ġshelves":20726,"Ġunconstitutional":20727,"Ġreproduction":20728,"Ġmerchant":20729,"mia":20730,"Ġmetrics":20731,"Ġexplosives":20732,"ĠSonia":20733,"Ġbodily":20734,"Ġthickness":20735,"Ġpredominantly":20736,"ĠAbility":20737,"Ġmonitored":20738,"ICH":20739,"Ġ].":20740,"ĠMartinez":20741,"Ġvisibility":20742,"Ġqueries":20743,"Ġgenocide":20744,"ĠWarfare":20745,"Query":20746,"Ġstudios":20747,"Ġembry":20748,"Ġcorridor":20749,"Ġcleaned":20750,"complete":20751,"ĠMH":20752,"Ġenrollment":20753,"INGS":20754,"Ġimpacted":20755,"Ġdisastrous":20756,"ĠYun":20757,"ĠClaire":20758,"ĠBasically":20759,"yt":20760,"usterity":20761,"Ġindirectly":20762,"wik":20763,"Ġdod":20764,"ĠCarr":20765,"Ġamp":20766,"Ġprohibit":20767,"ĠInitial":20768,"ĠRd":20769,"iji":20770,"Ġeducate":20771,"corn":20772,"iott":20773,"ĠBeauty":20774,"Ġdetective":20775,"ĠConn":20776,"since":20777,"Ġstagger":20778,"Ġobese":20779,"Ġbree":20780,"ologic":20781,"isse":20782,"walker":20783,"Ġblades":20784,"Ġlawful":20785,"func":20786,"ĠBehind":20787,"Ġappetite":20788,"Ġ(*":20789,"Ġtennis":20790,"Ġoffspring":20791,"Ġjets":20792,"Ġstructured":20793,"Ġaforementioned":20794,"Nov":20795,"Ġscaling":20796,"fill":20797,"Ġstew":20798,"Ġcurb":20799,"ĠStephan":20800,"edIn":20801,"SF":20802,"obic":20803,"éŃĶ":20804,"oug":20805,"ĠMM":20806,"Ġgenetically":20807,"opez":20808,"136":20809,"Ġumb":20810,"ancers":20811,"Ġcohort":20812,"Ġmerchandise":20813,"Ġimposing":20814,"ĠLegislature":20815,"ĠArchive":20816,"ivia":20817,"ĠNaval":20818,"Ġoffences":20819,"Ġmiracle":20820,"Ġsnapped":20821,"Ġfoes":20822,"Ġextensively":20823,"ĠRaf":20824,"Ġcater":20825,"edience":20826,"Kit":20827,"ĠBin":20828,"Ġrecommends":20829,"ĠCities":20830,"Ġrigid":20831,"ĠREAD":20832,"ĠNoble":20833,"ĠTian":20834,"Ġcertificates":20835,"antis":20836,"oiler":20837,"ĠBuddhist":20838,"did":20839,"Ġsurveyed":20840,"Ġdownward":20841,"Ġprints":20842,"ĠMotion":20843,"ronics":20844,"ĠSans":20845,"ossibly":20846,"uctions":20847,"Ġcolonies":20848,"ĠDanish":20849,"unit":20850,"Ġspoil":20851,"Ġadvisory":20852,"berries":20853,"Plan":20854,"Ġspecification":20855,"ophers":20856,"ĠResource":20857,"Ġshirts":20858,"prisingly":20859,"communications":20860,"Ġtrivial":20861,"Ġmentioning":20862,"isexual":20863,"Ġsupplements":20864,"Ġsupervision":20865,"BP":20866,"vor":20867,"Ġwit":20868,"Ġcooldown":20869,"Ġplaintiff":20870,"ĠReviews":20871,"ĠSri":20872,"ĠMint":20873,"ĠSugar":20874,"Ġafterward":20875,"ĠPriest":20876,"ĠInvestment":20877,"ogene":20878,"ĠTaking":20879,"Ġstretching":20880,"Ġinflammation":20881,"ĠTehran":20882,"Ġlining":20883,"Ġfreezing":20884,"ĠEntity":20885,"Ġinspiring":20886,"special":20887,"price":20888,"Ġsue":20889,"ĠPorter":20890,"ounge":20891,"ETA":20892,"ĠDerek":20893,"ĠLuis":20894,"uo":20895,"ymph":20896,"Ġexterior":20897,"ihil":20898,"ĠAshley":20899,"inator":20900,"Ġnutrients":20901,"ĠThrones":20902,"Ġfinances":20903,"ĠInspect":20904,"Ġspecially":20905,"ĠRequired":20906,"ĠPTS":20907,"ĠViolence":20908,"ointed":20909,"shots":20910,"Ġexcerpt":20911,"coon":20912,"INS":20913,"ĠGri":20914,"Ġrecognised":20915,"Week":20916,"Young":20917,"Ġvom":20918,"isle":20919,"ĠCurry":20920,"ĠBuddh":20921,"Ġnotebook":20922,"Ġdurable":20923,"/?":20924,"ĠGad":20925,"ĠPupp":20926,"Ġforgive":20927,"park":20928,"Ġpersonalities":20929,"analysis":20930,"clamation":20931,"Ġelevator":20932,"Ġwarehouse":20933,"ĠRole":20934,"unn":20935,"Ġillustration":20936,"ĠScan":20937,"Ġatmospheric":20938,"Import":20939,"ANC":20940,"ricted":20941,"fu":20942,"010":20943,"Ġarche":20944,"Ġrewarded":20945,"akespeare":20946,"Ġinternally":20947,"ĠRBI":20948,"alker":20949,"Ġelephant":20950,"owitz":20951,"ĠPizza":20952,"Ġbipartisan":20953,"és":20954,"Ġslowed":20955,"ĠStark":20956,"Ġoverride":20957,"OUS":20958,"Ġ320":20959,"undreds":20960,"ĠDeck":20961,"ĠCensus":20962,"bee":20963,"146":20964,"otor":20965,"Ġip":20966,"Ġub":20967,"ocations":20968,"ĠButton":20969,"rice":20970,"Ġcripp":20971,"fff":20972,"Ġoriginated":20973,"Ġoverwhelmed":20974,"appa":20975,"Ġforemost":20976,"âĢij":20977,"ĠLEG":20978,"release":20979,"eatured":20980,"atches":20981,"Ġreps":20982,"Ġlending":20983,"ĠReference":20984,"ĠClient":20985,"165":20986,"venth":20987,"Complete":20988,"ĠPatrol":20989,"Ġsworn":20990,"cam":20991,"Ġshuttle":20992,"ĠRalph":20993,"Ġhometown":20994,"-,":20995,"onal":20996,"ĠBP":20997,"åı":20998,"Ġpersuade":20999,"ĠAlexand":21000,"Ġcombines":21001,"Ġvivid":21002,"ĠLag":21003,"Ġencoding":21004,"Ġsalvation":21005,"wen":21006,"ĠRecovery":21007,"iya":21008,"University":21009,"ĠBiden":21010,"Ġbudgets":21011,"ĠTexans":21012,"fits":21013,"Ġhonored":21014,"Ġpython":21015,"TD":21016,"###":21017,"clone":21018,"Ġblink":21019,"ĠLiquid":21020,"Ġunemployed":21021,"Ġclashes":21022,"ĠCounsel":21023,"Ġdirecting":21024,"Ġpunct":21025,"ĠFalcons":21026,"Ġshark":21027,"ĠDamascus":21028,"Ġjeans":21029,"Ġembark":21030,"Ġseize":21031,"Ġupwards":21032,"280":21033,"ĠEz":21034,"ĠAnything":21035,"Ġexotic":21036,"lower":21037,"ĠCreator":21038,"ĠUm":21039,"Ġsuburbs":21040,"berger":21041,"ĠWend":21042,"Ġmint":21043,"ĠXX":21044,"ĠDro":21045,"Ġsuffers":21046,"Ġherb":21047,"tree":21048,"Ġfragile":21049,"Ġflooded":21050,"ĠAlcohol":21051,"olean":21052,"nyder":21053,"ĠKO":21054,"Fram":21055,"Ġ136":21056,"Ġowed":21057,"ĠMelee":21058,"ĠHash":21059,"Ġwhisk":21060,"Ġsudo":21061,"rr":21062,"Quick":21063,"appro":21064,"Ġii":21065,"ĠExamples":21066,"hee":21067,"Ġpromotes":21068,"perature":21069,"kar":21070,"ĠHonor":21071,"Ġsodium":21072,"ĠLif":21073,"rosso":21074,"intendent":21075,"Ġcorrespondent":21076,"Found":21077,"secret":21078,"Ġidentifies":21079,"agne":21080,"Ġlou":21081,"ĠPP":21082,"Ġcoincidence":21083,"move":21084,"Ġmilitia":21085,"Ġinfiltr":21086,"ĠPrimary":21087,"Ġpitching":21088,"ĠIb":21089,"ĠGOOD":21090,"ãĤ¸":21091,"ĠWizards":21092,"iral":21093,"ĠVenus":21094,"RR":21095,"ĠâĢķ":21096,"ĠCasey":21097,"Ġsadly":21098,"Ġadmire":21099,"Ġembarrassed":21100,"cb":21101,"Mel":21102,"Ġtubes":21103,"Ġbeautifully":21104,"ĠQueensland":21105,"Below":21106,"rez":21107,"quet":21108,"pleasant":21109,"Ġ«":21110,"Camp":21111,"Ġdecisive":21112,"1998":21113,"ĠLamb":21114,"utton":21115,"hn":21116,"ĠJagu":21117,"aunder":21118,"ĠCord":21119,"Ġclerk":21120,"Ġcaffe":21121,"Ġwiped":21122,"Ġreim":21123,"ĠMountains":21124,"Ġimprisoned":21125,"Ġdevelops":21126,"ĠPra":21127,"Ġmodeling":21128,"Anyone":21129,"ancel":21130,"ĠSit":21131,"Ġshields":21132,"Ġlawn":21133,"Ġcardiovascular":21134,"Ġdemonstrating":21135,"Ġparse":21136,"ĠIsraelis":21137,"Ġeuros":21138,"143":21139,"Ġglorious":21140,"inski":21141,"ecd":21142,"Ġconditioning":21143,"Ġhelpless":21144,"Ġmicrosc":21145,"ĠHarbor":21146,"Ġstakes":21147,"Ġ260":21148,"Ġunequ":21149,"ĠFloyd":21150,"Ġdamp":21151,"Ġapparatus":21152,"ĠLaws":21153,"Ġcounters":21154,"Ġinduce":21155,"atable":21156,"ĠAhmed":21157,"Ġslam":21158,"November":21159,"Ġpersist":21160,"Ġimminent":21161,"án":21162,"Ġshred":21163,"Ġphases":21164,"ĠEdmonton":21165,"ĠArmstrong":21166,"ĠMeet":21167,"ĠKitty":21168,"ÑĢ":21169,"circ":21170,"ĠAdult":21171,"Ġarose":21172,"ĠXen":21173,"Dan":21174,"gow":21175,"Ġsuperf":21176,"ĠAdmir":21177,"Ġendure":21178,"Ġkeyword":21179,"yrus":21180,"Ġyarn":21181,"Ġpathway":21182,"ĠHopkins":21183,"midt":21184,"Ġcensorship":21185,"dependent":21186,"Ġinstructor":21187,"Sources":21188,"Ġtoe":21189,"Ġballoon":21190,"Nob":21191,"Ġswear":21192,"ĠCastro":21193,"Ġgloss":21194,"ĠKavanaugh":21195,"Ġremarkably":21196,"Photos":21197,"ĠNom":21198,"ĠSoutheast":21199,"yers":21200,"Ġvalidation":21201,"Ġcannon":21202,"ĠVictory":21203,"ĠPierre":21204,"Ġcautious":21205,"Audio":21206,"Ġfetch":21207,"ĠGift":21208,"ĠHyp":21209,"Ġremedy":21210,"ZE":21211,"Ġscent":21212,"Ġbeard":21213,"ĠRut":21214,"-\"":21215,"Ġpatents":21216,"Hy":21217,"Ġunjust":21218,"Ġpotato":21219,"Ġforthcoming":21220,"Ġchef":21221,"ĠRift":21222,"affe":21223,"ĠROM":21224,"ĠLaunch":21225,"Ġpads":21226,"ĠNeo":21227,"Ġonset":21228,"Ġsqueeze":21229,"safe":21230,"Ġprefix":21231,"ĠTM":21232,"ĠNearly":21233,"ĠClinical":21234,"ĠMental":21235,"otiation":21236,"ĠUnic":21237,"antry":21238,"ĠCir":21239,"Ġepit":21240,"æ":21241,"Ġextracted":21242,"versely":21243,"riad":21244,"Ġstrains":21245,"Ġtops":21246,"Ġpoem":21247,"ĠRandy":21248,"ĠMaple":21249,"THER":21250,"upiter":21251,"ĠSSD":21252,"ļé":21253,"Ġuncon":21254,"pering":21255,"Ġslept":21256,"iners":21257,"Ġunderwater":21258,"ĠEvidence":21259,"gone":21260,"205":21261,"Ġhistorians":21262,"Ġsynthesis":21263,"Ġfrog":21264,"basketball":21265,"Ġvibrant":21266,"Ġsubord":21267,"Ġ365":21268,"ĠDial":21269,"Ġcooperate":21270,"HAHA":21271,"Ġgreeted":21272,"158":21273,"Ġjazz":21274,"Ġintox":21275,"ĠWalking":21276,"Ġsupervisor":21277,"ĠFusion":21278,"ĠMercedes":21279,"send":21280,"Ham":21281,"sd":21282,"nl":21283,"Ġtours":21284,"ĠFIFA":21285,"Ġculp":21286,"gd":21287,"304":21288,"Ġpleas":21289,"Ġillustrates":21290,"ĠColombia":21291,"Ġhighlighting":21292,"ĠSummary":21293,"Ġexposing":21294,"ĠDru":21295,"Ġirony":21296,"ritional":21297,"ĠCarroll":21298,"ĠEllis":21299,"Pict":21300,"ĠRapt":21301,"Ġadapter":21302,"Ġunm":21303,"Ġcorpse":21304,"Ġcelebrities":21305,"Den":21306,"atum":21307,"ĠApocalypse":21308,"ĠWag":21309,"lining":21310,"Ġhormones":21311,"Rub":21312,"ĠXi":21313,"ĠVaults":21314,"208":21315,"alkyrie":21316,"inosaur":21317,"Ġfeeds":21318,"vity":21319,"Ġdefeating":21320,"Wait":21321,"Ġemphasize":21322,"ĠSteelers":21323,"yrinth":21324,"leys":21325,"ĠWhenever":21326,"Currently":21327,"ĠClock":21328,"Ġcollectively":21329,"anyon":21330,"ĠJP":21331,"Ġmentality":21332,"Ġdownloads":21333,"Ġsurroundings":21334,"ĠBarnes":21335,"Ġflagship":21336,"Ġindicators":21337,"Ġgrapp":21338,"January":21339,"ĠElemental":21340,"ĠAthena":21341,"ibal":21342,"Ġsights":21343,"Ġcapita":21344,"ĠTreaty":21345,"Ġvoiced":21346,"ĠGaz":21347,"lette":21348,"Ġya":21349,"Ġexpired":21350,"Legend":21351,"Hot":21352,"nature":21353,"Ġunstable":21354,"Ġ280":21355,"ú":21356,"Comment":21357,"ALE":21358,"Ġquests":21359,"Ġhandler":21360,"nis":21361,"Ġversatile":21362,"Ġconceal":21363,"engeance":21364,"ĠInteractive":21365,"Ġobsessed":21366,"ĠDogs":21367,"Ġcracked":21368,"Sound":21369,"sv":21370,"ĠDylan":21371,"roads":21372,"fx":21373,"ĠCatholics":21374,"ĠHag":21375,"Ġslammed":21376,"Ġglowing":21377,"sale":21378,"Ġtissues":21379,"ĠChi":21380,"nee":21381,"Ġcher":21382,"sic":21383,"urrection":21384,"Ġbacon":21385,"ulatory":21386,").\"":21387,"Ġirregular":21388,"FORM":21389,"assed":21390,"Ġintentional":21391,"Ġcompensate":21392,"ĠSpeaking":21393,"ĠSets":21394,"153":21395,"Ġconventions":21396,"bands":21397,"emade":21398,"Ġecc":21399,"ĠWinston":21400,"ĠAssassin":21401,"ĠBelgian":21402,"Ġdependence":21403,"Ġniche":21404,"Ġbark":21405,"ĠJazz":21406,"Ġdisadvantage":21407,"Ġgasoline":21408,"Ġ165":21409,"çļĦ":21410,"essa":21411,"module":21412,"angular":21413,"OY":21414,"ĠTreatment":21415,"itas":21416,"olation":21417,"ĠArnold":21418,"Ġfeud":21419,"ĠNest":21420,"Ġtheatre":21421,"ewater":21422,"Ġminors":21423,"olicy":21424,"ĠHaven":21425,"division":21426,"Ġtrunk":21427,"Far":21428,"ĠPull":21429,"Ġcapturing":21430,"Ġ1800":21431,"ĠTeen":21432,"Ġexempl":21433,"Ġclinics":21434,"ĠBurg":21435,"Ġsubstit":21436,"Ġpayload":21437,"ĠLav":21438,"ĠTroy":21439,"ĠWitness":21440,"Ġfragments":21441,"Ġpasswords":21442,"Ġgospel":21443,"ĠGin":21444,"Ġtenants":21445,"olith":21446,"Six":21447,"Previous":21448,"ĠAges":21449,"ĠDarwin":21450,"Ġblat":21451,"Ġempathy":21452,"smith":21453,"bag":21454,"ĠEcho":21455,"ĠCamb":21456,"ĠMadd":21457,"ĠBoo":21458,"Ġrede":21459,"ĠBurning":21460,"Ġsmoothly":21461,"ĠAdrian":21462,"ĠVampire":21463,"ĠMonsters":21464,"steam":21465,"Style":21466,"Ma":21467,"rea":21468,"ĠDwar":21469,"alyst":21470,"ursor":21471,"Ġelimination":21472,"Ġcrypto":21473,"cht":21474,"ĠEternal":21475,"âĢ¦]":21476,"ĠSorce":21477,"Ill":21478,"NER":21479,"Ġuh":21480,"Conclusion":21481,"wage":21482,"Ġrespir":21483,"Ġreminis":21484,"hetical":21485,"Ġgy":21486,"Ġutilized":21487,"icidal":21488,"Ġ1900":21489,"Ġhunters":21490,"ĠSwan":21491,"ĠReact":21492,"Ġvisitor":21493,"ĠThanksgiving":21494,"308":21495,"Posts":21496,"Ġhips":21497,"1997":21498,"omers":21499,"Ġknocking":21500,"ĠVehicle":21501,"Ġtil":21502,"Ġ138":21503,"Ġmi":21504,"ĠInvestigation":21505,"ĠKenya":21506,"Ġcasino":21507,"Ġmotives":21508,"Ġregain":21509,"rex":21510,"Ġweekends":21511,"Ġstabbed":21512,"boro":21513,"Ġexploited":21514,"ĠHAVE":21515,"ĠTelevision":21516,"cock":21517,"Ġpreparations":21518,"Ġendeav":21519,"ĠRemote":21520,"ĠMaker":21521,"ĠProdu":21522,"ĠEvan":21523,"Ġinformational":21524,"ĠLouisville":21525,"154":21526,"ĠDreams":21527,"Ġplots":21528,"ĠRunner":21529,"Ġhurting":21530,"Ġacademy":21531,"ĠMontgomery":21532,"nm":21533,"ĠLanc":21534,"ĠAlz":21535,"210":21536,"elong":21537,"Ġretailer":21538,"Ġarising":21539,"Ġrebellion":21540,"Ġblonde":21541,"played":21542,"Ġinstrumental":21543,"Cross":21544,"Ġretention":21545,"Ġtherapeutic":21546,"Ġseas":21547,"Ġinfantry":21548,"ĠClint":21549,"Ġprompting":21550,"Ġbitch":21551,"Ġstems":21552,"ĠKra":21553,"Ġthesis":21554,"ĠBog":21555,"rued":21556,"Ġkings":21557,"Ġclay":21558,"ificent":21559,"ĠYES":21560,"ĠThing":21561,"ĠCubs":21562,"veyard":21563,"elsh":21564,"inarily":21565,"ĠEy":21566,"ĠRolling":21567,"Ġevolving":21568,"India":21569,"Ġrecognizes":21570,"Ġgraduation":21571,"isers":21572,"Ġfertility":21573,"ĠMilan":21574,"Command":21575,"Ġboxing":21576,"Ġ1943":21577,"Ġgluten":21578,"ĠEmir":21579,"Ġidol":21580,"Ġconceived":21581,"ĠCreation":21582,"Merit":21583,"uddy":21584,"ussions":21585,"ĠLieutenant":21586,"ietal":21587,"Ġunchanged":21588,"ĠScale":21589,"ĠCrimea":21590,"balls":21591,"atorial":21592,"Ġdepths":21593,"Ġempirical":21594,"Ġtransm":21595,"Ġunsafe":21596,"missible":21597,"comfort":21598,"156":21599,"Ġmechanic":21600,"002":21601,"lins":21602,"Ġsmoked":21603,"Pos":21604,"Ġslowing":21605,"Ġlav":21606,"Texas":21607,"Ġcheating":21608,"ĠMetropolitan":21609,"ethyl":21610,"Ġdiscovering":21611,"asse":21612,"Ġpencil":21613,"ĠPyongyang":21614,"Ġcloset":21615,"ĠSheet":21616,"ĠEntry":21617,"oustic":21618,"Ġmyst":21619,"erate":21620,"ariat":21621,"Ġminerals":21622,"Ġmusician":21623,"ĠPul":21624,"ĠMaz":21625,"249":21626,"Ġpermissions":21627,"Ġiv":21628,"enary":21629,"ickers":21630,"ĠBing":21631,"hea":21632,"enable":21633,"Ġgriev":21634,"Ġasserted":21635,"ĠColonel":21636,"Ġaffidav":21637,"wo":21638,"Ġseated":21639,"ĠRide":21640,"Ġpaintings":21641,"ĠPix":21642,"Ġ137":21643,"ishi":21644,"umbai":21645,"gotten":21646,"ĠEarl":21647,"Ġinning":21648,"Ġcensus":21649,"Ġtravelled":21650,"ĠConsult":21651,"185":21652,"bind":21653,"Ġsimplicity":21654,"Ġoverlooked":21655,"ĠHelpful":21656,"Ġmonkey":21657,"Ġoverwhelmingly":21658,"Blood":21659,"ĠFlint":21660,"ĠJama":21661,"ĠPresent":21662,"ĠRage":21663,"ĠTA":21664,"ptive":21665,"Ġturnout":21666,"wald":21667,"ĠDolphins":21668,"ĠVPN":21669,"Ġonion":21670,"Ġcrafting":21671,"mma":21672,"ĠMercury":21673,"Ġarrange":21674,"Ġalerts":21675,"ĠOT":21676,"zbollah":21677,"Ġgases":21678,"ĠRichardson":21679,"sal":21680,"lar":21681,"Ġfrost":21682,"Ġlowering":21683,"Ġacclaim":21684,"Ġstartups":21685,"ĠGain":21686,"essment":21687,"Ġguardian":21688,"人":21689,"ĠPie":21690,"ĠLinks":21691,"Ġmerits":21692,"Ġawake":21693,"Ġparental":21694,"Ġexceeds":21695,"Ġidle":21696,"ĠPilot":21697,"ĠeBay":21698,"ĠAccept":21699,"ipeg":21700,"Cam":21701,"ĠKot":21702,"Ġtraders":21703,"olitics":21704,"unker":21705,"ĠPale":21706,"osi":21707,"anmar":21708,"Ġ1947":21709,"ĠFell":21710,"estial":21711,"itating":21712,"GF":21713,"ĠSr":21714,"ifted":21715,"Ġconnector":21716,"ĠBone":21717,"illes":21718,"260":21719,"hma":21720,"Ġoverlap":21721,"ĠGitHub":21722,"Ġcleaner":21723,"ĠBaptist":21724,"ĠWAS":21725,"Ġlungs":21726,"Ñģ":21727,"ĠBUT":21728,"Ġcite":21729,"Ġpitched":21730,"reatment":21731,"Ġtrophies":21732,"ĠNu":21733,"386":21734,"ĠPride":21735,"Ġattendees":21736,"[]":21737,"179":21738,"Ġspatial":21739,"Ġprizes":21740,"ĠReligion":21741,"Ġshowcase":21742,"ĠCategory":21743,"vidia":21744,"Target":21745,"Property":21746,"?,":21747,"Ġfusion":21748,"pie":21749,"ĠUCLA":21750,"Ġsoundtrack":21751,"Ġprincess":21752,"ĠCaval":21753,"should":21754,"Ġlimbs":21755,"Background":21756,"Ġlonely":21757,"Ġcores":21758,"ĠTail":21759,"sheet":21760,"Ġ132":21761,"Ra":21762,"ãĤ«":21763,"ĠBolt":21764,"Ġbooked":21765,"Ġadminister":21766,"Ġequals":21767,"wy":21768,"Ġobserving":21769,"ĠBaron":21770,"ĠAdobe":21771,"Ġvirgin":21772,"ĠSocialist":21773,"Move":21774,"ghazi":21775,"ĠLinda":21776,"212":21777,"Ġbrewing":21778,"Ġmerchants":21779,"burse":21780,"Ġdivor":21781,"Ġmetals":21782,"ĠNer":21783,"Ġsums":21784,"ĠEnemy":21785,"Ġenvision":21786,"Ġgranting":21787,"ĠHoney":21788,"ĠSkyrim":21789,"Ġsocio":21790,"graded":21791,"Ġselective":21792,"WASHINGTON":21793,"Ġ1948":21794,"ĠSirius":21795,"ĠGross":21796,"activity":21797,"ĠIvan":21798,"Ġfurious":21799,"BSD":21800,"ĠPrevious":21801,"Ġresponsive":21802,"Ġcharitable":21803,"Ġleaning":21804,"ĠPew":21805,"Ġviolates":21806,"\\\\\\\\\\\\\\\\":21807,"ĠComing":21808,"wire":21809,"Ġpoet":21810,"Ġresolutions":21811,"command":21812,"ĠPortuguese":21813,"Ġnickname":21814,"Ġdeaf":21815,"February":21816,"Ġrecognise":21817,"Ġentirety":21818,"Ġseasonal":21819,"placed":21820,"ĠTelegraph":21821,"Ġmicrophone":21822,"ouring":21823,"Ġgrains":21824,"Ġgoverned":21825,"Ġpostp":21826,"ĠWaters":21827,"inement":21828,"Ġundocumented":21829,"ĠComcast":21830,"Ġfox":21831,"Ġassaults":21832,"reon":21833,"many":21834,"ĠJenkins":21835,"ĠAnyway":21836,"Ġassessments":21837,"Ġdowns":21838,"ĠMouse":21839,"Ġsuperb":21840,"kt":21841,"ĠDow":21842,"Ġtaxation":21843,"401":21844,"Ġsmiles":21845,"Ġundertaken":21846,"Ġexh":21847,"Ġenthusiastic":21848,"Ġtwent":21849,"Ġgovernmental":21850,"Ġautonomy":21851,"ĠTechnologies":21852,"ĠChain":21853,"Ġprevalent":21854,"fb":21855,"Ġnicotine":21856,"ogram":21857,"job":21858,"Ġawaiting":21859,"ĠMenu":21860,"Ġdeputies":21861,"kov":21862,"ishops":21863,"Button":21864,"ĠShanghai":21865,"Ġdiesel":21866,"ĠDuck":21867,"Ryan":21868,"ĠPCs":21869,"NF":21870,"jury":21871,"ente":21872,"Ġinaccurate":21873,"eddy":21874,"Whatever":21875,"Ġshowc":21876,"ĠNad":21877,"odus":21878,"etr":21879,"Ġplaintiffs":21880,"ĠWOR":21881,"ĠAssange":21882,"Ġprivat":21883,"Ġpremiums":21884,"Ġtam":21885,"URL":21886,"Ġelites":21887,"ĠRanger":21888,"ottenham":21889,"ĠHoff":21890,"ĠAthens":21891,"Ġdefinite":21892,"Ġsighed":21893,"Ġevenly":21894,"211":21895,"ĠAmber":21896,"akia":21897,"Ġmailing":21898,"Ġcrashing":21899,"ĠConfederate":21900,"rugged":21901,"Wal":21902,"ĠDepths":21903,"Ġjuvenile":21904,"Ġreactor":21905,"Introduction":21906,"ĠDeluxe":21907,"1995":21908,"ĠSanchez":21909,"ĠMead":21910,"ivable":21911,":-":21912,"ĠPlanning":21913,"ĠTrap":21914,"quin":21915,"ĠProtect":21916,"vered":21917,"Information":21918,"Ġkidney":21919,"innamon":21920,"las":21921,"Ġpolicing":21922,"Ġtolerate":21923,"ĠQi":21924,"Ġbiased":21925,"Fort":21926,"ĠKi":21927,"save":21928,"Ġprivileged":21929,"Ġbeasts":21930,"ĠGlas":21931,"ĠCinem":21932,"Ġcomeback":21933,"Sunday":21934,"Ġextinction":21935,"hops":21936,"Ġtransmit":21937,"Ġdoubles":21938,"ĠFlat":21939,"167":21940,"Ġdisputed":21941,"Ġinjustice":21942,"foo":21943,"Vict":21944,"roleum":21945,"ĠJulie":21946,"Context":21947,"ĠRarity":21948,"issue":21949,"Component":21950,"Ġcounseling":21951,"anne":21952,"dark":21953,"Ġobjections":21954,"uilt":21955,"Ġgast":21956,"Ġplac":21957,"Ġunused":21958,"ãĥĩ":21959,"ĠTrial":21960,"ĠJas":21961,"hedral":21962,"obb":21963,"Ġtemporal":21964,"ĠPRO":21965,"ĠNW":21966,"ĠAnniversary":21967,"Large":21968,"Ġtherm":21969,"Ġdavid":21970,"Ġsystemic":21971,"ĠShir":21972,"mut":21973,"ĠNept":21974,"address":21975,"Ġscanning":21976,"Ġunderstandable":21977,"Ġcanvas":21978,"Cat":21979,"ĠZoo":21980,"Ġangels":21981,"LO":21982,"ĠStatement":21983,"ĠSig":21984,"ovable":21985,"ĠAway":21986,"sharing":21987,"ocrats":21988,"stated":21989,"Ġweighing":21990,"Nor":21991,"wild":21992,"Bey":21993,"Ġastonishing":21994,"ĠReynolds":21995,"Ġopener":21996,"Ġtrainer":21997,"Ġsurgical":21998,"pn":21999,"Ġadjusting":22000,"wheel":22001,"Ġfrown":22002,"ervative":22003,"Ġsuspend":22004,"Within":22005,"tein":22006,"Ġobstacle":22007,"Ġliberties":22008,"ymes":22009,"Ġuranium":22010,"ansom":22011,"anol":22012,"uba":22013,"ĠLoss":22014,"Ġarous":22015,"ĠHenderson":22016,"Wow":22017,"spl":22018,"cur":22019,"ĠÂŃ":22020,"Ġtheirs":22021,"Damage":22022,"Ġdownloading":22023,"Ġdiscern":22024,"ĠSto":22025,"ĠFla":22026,"Ġhath":22027,"ĠAj":22028,"Ġunpleasant":22029,"European":22030,"expensive":22031,"Ġscreenshot":22032,"ĠUV":22033,"Ġallied":22034,"ĠPersian":22035,"Ġmonopoly":22036,"Ġatom":22037,"ĠRedskins":22038,"\"><":22039,"Ġcancell":22040,"Ġcinema":22041,"131":22042,"fair":22043,"ĠAlfred":22044,"Ġduck":22045,"args":22046,"223":22047,"ĠISI":22048,"Ġsignaling":22049,"inar":22050,"Ġlaughs":22051,"Ġforwards":22052,"Ġreckless":22053,"Ġlisteners":22054,"ativity":22055,"Ġvastly":22056,"nant":22057,"Less":22058,"ĠHunting":22059,"ĠScientific":22060,"ITED":22061,"Ġknight":22062,"ĠHTC":22063,"usa":22064,"tmp":22065,"Ġrude":22066,"ĠLegendary":22067,"Ġarises":22068,"Bad":22069,"ĠClaim":22070,"peg":22071,"Ġrealities":22072,"Think":22073,"Ġ°":22074,"Ġrode":22075,"Ġstrive":22076,"Ġanecd":22077,"Ġshorts":22078,"Ġhypothes":22079,"Ġcoordinated":22080,"ĠGandhi":22081,"ĠFPS":22082,"RED":22083,"Ġsusceptible":22084,"Ġshrink":22085,"ĠChart":22086,"Help":22087,"Ġion":22088,"deep":22089,"ribes":22090,"ĠKai":22091,"ĠCustomer":22092,"Summary":22093,"Ġcough":22094,"wife":22095,"Ġlend":22096,"Ġpositioning":22097,"Ġlottery":22098,"ĠCanyon":22099,"Ġfade":22100,"Ġbronze":22101,"ĠKenny":22102,"Ġboasts":22103,"ĠEnhanced":22104,"record":22105,"Ġemergence":22106,"Ġakin":22107,"ĠBert":22108,"itous":22109,"âĸij":22110,"Ġstip":22111,"Ġexchanged":22112,"omore":22113,"alsh":22114,"Ġreservoir":22115,"Ġstandpoint":22116,"WM":22117,"Ġinitiate":22118,"Ġdecay":22119,"Ġbrewery":22120,"Ġterribly":22121,"Ġmortal":22122,"levard":22123,"Ġrevis":22124,"NI":22125,"elo":22126,"Ġconfess":22127,"ĠMSNBC":22128,"Ġsubmissions":22129,"Controller":22130,"Ġ202":22131,"ĠRuth":22132,"});":22133,"ĠAzure":22134,"Ġ.\"":22135,"206":22136,"ĠMarketing":22137,"Ġlaund":22138,"iencies":22139,"Ġrenowned":22140,"ĠTrou":22141,"ĠNGO":22142,"blems":22143,"Ġterrified":22144,"Ġwarns":22145,"Ġpert":22146,"Ġunsure":22147,"480":22148,"alez":22149,"ultz":22150,"ĠOutside":22151,"Ġstyl":22152,"ĠUnderground":22153,"Ġpanc":22154,"Ġdictionary":22155,"Ġfoe":22156,"riminal":22157,"ĠNorwegian":22158,"Ġjailed":22159,"Ġmaternal":22160,"ée":22161,"ĠLucy":22162,"cop":22163,"Cho":22164,"Ġunsigned":22165,"ĠZelda":22166,"ĠInsider":22167,"ĠContinued":22168,"Ġ133":22169,"ĠNaruto":22170,"ĠMajority":22171,"169":22172,"ĠWo":22173,"ãĤĵ":22174,"Ġpastor":22175,"Ġinformal":22176,"н":22177,"anthrop":22178,"join":22179,"ãģĹ":22180,"itational":22181,"NP":22182,"ĠWriting":22183,"fn":22184,"ĠBever":22185,"195":22186,"Ġyelling":22187,"Ġdrastically":22188,"Ġeject":22189,"Ġneut":22190,"Ġthrive":22191,"ĠFrequ":22192,"oux":22193,"Ġpossesses":22194,"ĠSenators":22195,"ĠDES":22196,"ĠShakespeare":22197,"ĠFranco":22198,"ĠLB":22199,"uchi":22200,"Ġincarn":22201,"Ġfounders":22202,"Function":22203,"Ġbrightness":22204,"ĠBT":22205,"Ġwhale":22206,"ĠTheater":22207,"mass":22208,"ĠDoll":22209,"Something":22210,"Ġechoed":22211,"ĠHex":22212,"crit":22213,"afia":22214,"Ġgoddess":22215,"Ġeleven":22216,"ĠPreview":22217,"ĠAurora":22218,"Ġ401":22219,"ulsive":22220,"ĠLogan":22221,"inburgh":22222,"ĠCenters":22223,"ĠONLY":22224,"ĠAid":22225,"Ġparadox":22226,"Ġhurd":22227,"ĠLC":22228,"Due":22229,"court":22230,"Ġoffended":22231,"Ġevaluating":22232,"ĠMatthews":22233,"Ġtomb":22234,"Ġpayroll":22235,"Ġextraction":22236,"ĠHands":22237,"ifi":22238,"Ġsupernatural":22239,"ĠCOMM":22240,"]=":22241,"dogs":22242,"Ġ512":22243,"ĠMeeting":22244,"Richard":22245,"ĠMaximum":22246,"Ġideals":22247,"Things":22248,"mand":22249,"ĠRegardless":22250,"Ġhumili":22251,"buffer":22252,"Little":22253,"ĠDani":22254,"ĠNak":22255,"Ġliberation":22256,"ĠAbe":22257,"ĠOL":22258,"Ġstuffed":22259,"aca":22260,"inda":22261,"raphic":22262,"Ġmosqu":22263,"Ġcampaigning":22264,"Ġoccupy":22265,"Squ":22266,"rina":22267,"ĠWel":22268,"ĠVS":22269,"Ġphysic":22270,"Ġpuls":22271,"rint":22272,"oaded":22273,"ETF":22274,"ĠArchives":22275,"Ġvenues":22276,"hner":22277,"ĠTurbo":22278,"Ġlust":22279,"Ġappealed":22280,"quez":22281,"ilib":22282,"ĠTimothy":22283,"Ġomn":22284,"dro":22285,"Ġobsession":22286,"ĠSavage":22287,"1996":22288,"Global":22289,"Jes":22290,"214":22291,"Ġsliding":22292,"Ġdisappro":22293,"ĠMagical":22294,"Ġvoluntarily":22295,"gb":22296,"aney":22297,"Ġprophet":22298,"ĠRein":22299,"ĠJulia":22300,"ĠWorth":22301,"aurus":22302,"Ġbounds":22303,"ieu":22304,")))":22305,"Ġcrore":22306,"ĠCitizen":22307,"Sky":22308,"Ġcolumnist":22309,"Ġseekers":22310,"ondo":22311,"ISA":22312,"ĠLength":22313,"Ġnostalg":22314,"Ġnewcom":22315,"Ġdetrim":22316,"entric":22317,"375":22318,"ĠGE":22319,"Ġautop":22320,"Ġacademics":22321,"AppData":22322,"ĠShen":22323,"Ġidiot":22324,"ĠTransit":22325,"Ġteaspoon":22326,"Wil":22327,"KO":22328,"ĠComedy":22329,">,":22330,"Ġpopulated":22331,"WD":22332,"Ġpigs":22333,"ĠOculus":22334,"Ġsympathetic":22335,"Ġmarathon":22336,"198":22337,"Ġseizure":22338,"sided":22339,"Ġdop":22340,"irtual":22341,"Land":22342,"ĠFloor":22343,"osaurs":22344,"...]":22345,"Ġlos":22346,"Ġsubsidiary":22347,"EY":22348,"ĠParts":22349,"ĠStef":22350,"ĠJudiciary":22351,"Ġ134":22352,"Ġmirrors":22353,"Ġket":22354,"times":22355,"Ġneurolog":22356,"Ġcav":22357,"ĠGuest":22358,"Ġtumor":22359,"scill":22360,"ĠLloyd":22361,"Est":22362,"Ġclearer":22363,"Ġstereotypes":22364,"Ġdur":22365,"nothing":22366,"Reddit":22367,"Ġnegotiated":22368,"------------------------":22369,"235":22370,"Ġflown":22371,"ĠSeoul":22372,"ĠResident":22373,"ĠSCH":22374,"Ġdisappearance":22375,"ĠVince":22376,"grown":22377,"Ġgrabs":22378,"ril":22379,"ĠInfinite":22380,"ĠTwenty":22381,"Ġpedestrian":22382,"Ġjersey":22383,"ĠFur":22384,"ĠInfinity":22385,"ĠElliott":22386,"Ġmentor":22387,"Ġmorally":22388,"Ġobey":22389,"secure":22390,"iffe":22391,"Ġantibiotics":22392,"angled":22393,"ĠFreeman":22394,"ĠIntroduction":22395,"Jun":22396,"Ġmarsh":22397,"icans":22398,"ĠEVENTS":22399,"ochond":22400,"Wall":22401,"iculty":22402,"Ġmisdemeanor":22403,"Ġly":22404,"Thomas":22405,"ĠResolution":22406,"Ġanimations":22407,"ĠDry":22408,"Ġintercourse":22409,"ĠNewcastle":22410,"ĠHog":22411,"ĠEquipment":22412,"177":22413,"Ġterritorial":22414,"Ġarchives":22415,"203":22416,"Filter":22417,"ĠMunich":22418,"Ġcommanded":22419,"ĠWand":22420,"Ġpitches":22421,"ĠCroat":22422,"Ġratios":22423,"ĠMits":22424,"Ġaccumulated":22425,"ĠSpecifically":22426,"Ġgentleman":22427,"acerb":22428,"Ġpenn":22429,"Ġaka":22430,"ĠFuk":22431,"Ġintervene":22432,"ĠRefuge":22433,"ĠAlzheimer":22434,"Ġsuccession":22435,"ohan":22436,"does":22437,"Lord":22438,"Ġseparat":22439,"Ġcorrespondence":22440,"Ġshiny":22441,"Prior":22442,"Ġsulf":22443,"Ġmiserable":22444,"Ġdedication":22445,"().":22446,"Ġspecialists":22447,"Ġdefects":22448,"ĠCult":22449,"ĠXia":22450,"Ġjeopard":22451,"ĠOre":22452,"Ability":22453,"Ġlear":22454,"Ġambitions":22455,"ĠBMI":22456,"ĠArabs":22457,"Ġ1942":22458,"Ġpreservation":22459,"ificate":22460,"Ġashamed":22461,"loss":22462,"ĠRestaur":22463,"Ġresemble":22464,"Ġenrich":22465,"ĠKN":22466,"ĠClan":22467,"float":22468,"Ġplayable":22469,"ITT":22470,"Ġharmony":22471,"arrison":22472,"ĠWeinstein":22473,"were":22474,"Ġpoisoning":22475,"ĠComput":22476,"ĠWordPress":22477,"major":22478,"ĠValve":22479,"Fan":22480,"ĠThrow":22481,"ĠRomans":22482,"ĠDepression":22483,"ados":22484,"Ġtortured":22485,"Ġbalancing":22486,"bottom":22487,"Ġacquiring":22488,"ĠMonte":22489,"ardi":22490,"Ġaura":22491,"Ġ##":22492,"ĠStanding":22493,"ĠAtlas":22494,"CF":22495,"Ġintrins":22496,"ĠBenghazi":22497,"Ġcamping":22498,"Ġtapped":22499,"blade":22500,"strous":22501,"ĠRabb":22502,"ĠWritten":22503,"tip":22504,"ĠNeigh":22505,"sterdam":22506,"ĠAllow":22507,"ĠHealing":22508,"ĠRhod":22509,"num":22510,"Ġcaffeine":22511,"ĠPercent":22512,"Ġboo":22513,"Ġapples":22514,"305":22515,"Ġwelcoming":22516,"Ġapplaud":22517,"Ġausterity":22518,"±":22519,"ĠReality":22520,"efe":22521,"å®":22522,"Ġsucks":22523,"Ġtabs":22524,"ĠPayPal":22525,"Ġbackpack":22526,"Ġgifted":22527,"abulary":22528,"ĠScout":22529,"irteen":22530,"Ġchin":22531,"Ġomitted":22532,"Ġnegatively":22533,"Ġaccessing":22534,"ĠEarn":22535,"Ġambulance":22536,"Ġheadphones":22537,"Ġ205":22538,"ĠRefresh":22539,"president":22540,"ĠKitchen":22541,"ĠEntered":22542,"ĠSnyder":22543,"005":22544,"omical":22545,"Ġborrowed":22546,"ĠNem":22547,"Ġaviation":22548,"Ġstall":22549,"rimination":22550,"Ġuniforms":22551,"itime":22552,"ĠSimmons":22553,"energy":22554,"ablished":22555,"yy":22556,"qualified":22557,"Ġrallies":22558,"ĠStuart":22559,"flight":22560,"Ġgangs":22561,"rag":22562,"Ġvault":22563,"lux":22564,"ĠCompar":22565,"Ġdesignation":22566,"209":22567,"ĠJos":22568,"dollar":22569,"zero":22570,"Ġwells":22571,"303":22572,"Ġconstituents":22573,"Ġheck":22574,"Ġcows":22575,"Ġcommanders":22576,"Ġdifferential":22577,"ĠCatherine":22578,"299":22579,"Ġvalve":22580,"Ġbrace":22581,"Ġperspectives":22582,"cert":22583,"fact":22584,"icularly":22585,"ĠMcN":22586,"planes":22587,"Ġintric":22588,"Ġpeas":22589,"ovan":22590,"Ġtossed":22591,"retch":22592,"ĠLopez":22593,"Ġunfamiliar":22594,"death":22595,"ĠApart":22596,"ĠChang":22597,"Ġrelieved":22598,"rophe":22599,"Ġairports":22600,"Ġfreak":22601,"util":22602,"Mill":22603,"ĠChin":22604,"ĠOwen":22605,"male":22606,"ĠBroken":22607,"ĠWinds":22608,"rob":22609,"rising":22610,"Ġfirefighters":22611,"Ġauthoritarian":22612,"Ġ148":22613,"Bitcoin":22614,"external":22615,"Ġbrowsers":22616,"ichever":22617,"orian":22618,"Ġunb":22619,"Ġpoke":22620,"ĠZot":22621,"Mid":22622,"ĠPopular":22623,"Ġcovert":22624,"Ġcontributes":22625,"Ġ650":22626,"Ġcontention":22627,"Gate":22628,"Ġconsoles":22629,"Ġchromos":22630,"ĠIX":22631,"Ġvisually":22632,"ĠEisen":22633,"Ġjewelry":22634,"Ġdelegation":22635,"Ġaccelerate":22636,"ĠRiley":22637,"Ġslope":22638,"Ġindoor":22639,"itially":22640,"Ġhugely":22641,"Ġtunnels":22642,"Ġfined":22643,"Ġdirective":22644,"Ġforehead":22645,"ustomed":22646,"Ġskate":22647,"Music":22648,"gas":22649,"Ġrecognizing":22650,"ambo":22651,"Ġoverweight":22652,"ĠGrade":22653,"ÙĬ":22654,"Ġsounding":22655,"Ġlocking":22656,"ĠREM":22657,"Store":22658,"Ġexcav":22659,"ĠLikewise":22660,"ĠLights":22661,"Ġelbow":22662,"ĠSupply":22663,"wic":22664,"Ġhandsome":22665,"1994":22666,"Coll":22667,"Ġadequately":22668,"ĠAssociate":22669,"Ġstrips":22670,"Ġcrackdown":22671,"Ġmarvel":22672,"ĠKun":22673,"Ġpassages":22674,"@@@@":22675,"ĠTall":22676,"Ġthoughtful":22677,"namese":22678,"Ġprostitution":22679,"business":22680,"Ġballistic":22681,"personal":22682,"cig":22683,"izational":22684,"Round":22685,"ĠÂłĠÂłĠÂłĠÂł":22686,"ĠColeman":22687,"Ġadmitting":22688,"ĠPlug":22689,"Ġbitcoins":22690,"ĠSuz":22691,"Ġfairness":22692,"Ġsupplier":22693,"Ġcatastrophic":22694,"ĠHelen":22695,"oqu":22696,"Marc":22697,"ĠArticles":22698,"gie":22699,"Ġendangered":22700,"Ġdestiny":22701,"ĠVolt":22702,"olia":22703,"axis":22704,"Ġcheat":22705,"Ġunified":22706,"ICO":22707,"quote":22708,"302":22709,"ĠSed":22710,"Ġsuppression":22711,"Ġanalyzing":22712,"Ġsquat":22713,"Ġfiguring":22714,"Ġcoordinates":22715,"Ġchunks":22716,"Ġ1946":22717,"Ġsubp":22718,"Ġwiki":22719,"ĠForbes":22720,"ĠJupiter":22721,"ĠErik":22722,"imer":22723,"ĠCommercial":22724,"\\)":22725,"Ġlegitimacy":22726,"Ġdental":22727,"ĠMean":22728,"Ġdeficits":22729,"550":22730,"Originally":22731,"ĠHorror":22732,"Ġcontamination":22733,"llah":22734,"Ġconfisc":22735,"ĠClare":22736,"TB":22737,"ĠFailed":22738,"aned":22739,"Ġruler":22740,"ĠController":22741,"Ġfeminists":22742,"Fix":22743,"gay":22744,"207":22745,"Ġrabbit":22746,"Third":22747,"owntown":22748,"Ġglue":22749,"Ġvolatile":22750,"Ġshining":22751,"Ġfoll":22752,"Ġimpaired":22753,"Ġsupers":22754,"æĪ":22755,"Ġclutch":22756,"ļéĨĴ":22757,"Ġprolet":22758,"Ġ(!":22759,"Ġyelled":22760,"ĠKiev":22761,"ĠErn":22762,"ĠShock":22763,"KB":22764,"Ġsituated":22765,"query":22766,"ĠNas":22767,"Ġannex":22768,"character":22769,"ĠHoliday":22770,"Ġautomation":22771,"ĠJill":22772,"ĠRemastered":22773,"Ġlinem":22774,"Ġwilderness":22775,"ĠHorizon":22776,"ĠGuinea":22777,"AZ":22778,"Ġmainland":22779,"Ġsecrecy":22780,"LEASE":22781,"Ġpunk":22782,"ĠProvince":22783,"(),":22784,"Speed":22785,"Ġhanding":22786,"ĠSebast":22787,"Sir":22788,"rase":22789,"Ġjournals":22790,"Ġcongest":22791,"ĠTut":22792,"irrel":22793,"Ġschizophrenia":22794,"Ġmisogyn":22795,"healthy":22796,"Iron":22797,"Ġreacted":22798,"-$":22799,"252":22800,"Ġplural":22801,"Ġplum":22802,"Ġbargain":22803,"Ġgrounded":22804,"finder":22805,"Ġdisse":22806,"ĠLaz":22807,"OOD":22808,"Ġatroc":22809,"Factory":22810,"Ġminions":22811,"Ġori":22812,"ĠBrave":22813,"ĠPRE":22814,"ĠMyanmar":22815,"ĠHod":22816,"Ġexpedition":22817,"Ġexplode":22818,"ĠCoord":22819,"Ġextr":22820,"ĠBrief":22821,"ĠADHD":22822,"Ġhardcore":22823,"feeding":22824,"Ġdile":22825,"ĠFruit":22826,"Ġvaccination":22827,"ĠMao":22828,"osphere":22829,"Ġcontests":22830,"-|":22831,"Ġfren":22832,"isphere":22833,"Rom":22834,"ĠSharp":22835,"ĠTrend":22836,"Ġdisconnect":22837,"âĢ¢âĢ¢":22838,"Ġpersecution":22839,"Earth":22840,"Ġhealthier":22841,"384":22842,"Ġcob":22843,"ĠTrinity":22844,"OWS":22845,"ANN":22846,"Ġspecialty":22847,"Ġgru":22848,"Ġcooperative":22849,"why":22850,"Starting":22851,"ĠIssues":22852,"stre":22853,"ensor":22854,"Ġ185":22855,"Adv":22856,"!?":22857,"ĠRevel":22858,"emia":22859,"ĠHulk":22860,"Ġcelebrations":22861,"ĠSou":22862,"raud":22863,"ĠKlein":22864,"Ġunreal":22865,"context":22866,"Ġpartnerships":22867,"Ġadopting":22868,"tical":22869,"Ġsplash":22870,"ĠHezbollah":22871,"category":22872,"cyclop":22873,"xton":22874,"ĠDot":22875,"urdy":22876,"tz":22877,"Ġenvelope":22878,"ĠNL":22879,"âķ":22880,"Ġwherein":22881,"Spec":22882,"184":22883,"Ġtelev":22884,"aliation":22885,"Ġmyths":22886,"å°":22887,"Ġrigorous":22888,"Ġcommunicating":22889,"Ġobserver":22890,"Ġrehe":22891,"ĠWash":22892,"Ġapologized":22893,"ĠTin":22894,"Ġexpenditures":22895,"workers":22896,"document":22897,"Ġhesitate":22898,"ĠLenin":22899,"Ġunpredictable":22900,"Ġrenewal":22901,"cler":22902,"okia":22903,"ĠCONT":22904,"Ġpostseason":22905,"Tokens":22906,"Ġexacerb":22907,"Ġbetting":22908,"Ġ147":22909,"Ġelevation":22910,"Wood":22911,"ĠSolomon":22912,"194":22913,"004":22914,"output":22915,"Ġredund":22916,"ĠMumbai":22917,"ĠpH":22918,"Ġreproduce":22919,"ĠDuration":22920,"MAX":22921,"Ġbog":22922,"CBS":22923,"ĠBalance":22924,"ĠSgt":22925,"ĠRecent":22926,"Ġcd":22927,"Ġpopped":22928,"Ġincompet":22929,"prop":22930,"ayan":22931,"guy":22932,"Pacific":22933,"Ġtyr":22934,"Ġ{{":22935,"ĠMystic":22936,"ĠDana":22937,"Ġmasturb":22938,"Ġgeometry":22939,"â":22940,"ĠCorrect":22941,"Ġtrajectory":22942,"Ġdistracted":22943,"Ġfoo":22944,"ĠWelsh":22945,"Luc":22946,"mith":22947,"Ġrugby":22948,"Ġrespiratory":22949,"Ġtriangle":22950,"Ġ215":22951,"Ġundergraduate":22952,"ĠSuperior":22953,"changing":22954,"_-":22955,"Ġrightly":22956,"Ġreferee":22957,"Ġlucrative":22958,"Ġunauthorized":22959,"Ġresembles":22960,"ĠGNU":22961,"ĠDerby":22962,"Ġpathways":22963,"ĠLed":22964,"Ġendurance":22965,"Ġstint":22966,"Ġcollector":22967,"Fast":22968,"Ġdots":22969,"Ġnationals":22970,"ĠSecurities":22971,"Ġwhip":22972,"Param":22973,"Ġlearns":22974,"Magic":22975,"Ġdetailing":22976,"moon":22977,"Ġbroadcasting":22978,"Ġbaked":22979,"265":22980,"holm":22981,"ĠSah":22982,"ĠHussein":22983,"ĠCourtesy":22984,"174":22985,"Ġ146":22986,"Ġgeographic":22987,"peace":22988,"Ġjudging":22989,"ĠStern":22990,"Bur":22991,"Ġstoryline":22992,"Gun":22993,"ĠStick":22994,"245":22995,"307":22996,"ãĤ´ãĥ³":22997,"ĠAdministrator":22998,"Ġburnt":22999,"Ġpave":23000,"choes":23001,"Exec":23002,"Ġcampuses":23003,"Result":23004,"Ġmutations":23005,"ĠCharter":23006,"Ġcaptures":23007,"Ġcompares":23008,"Ġbadge":23009,"Scient":23010,"Ġerad":23011,"iery":23012,"oi":23013,"ettes":23014,"ĠEstate":23015,"Ġstrap":23016,"Ġproudly":23017,"Ġfried":23018,"Ġwithdrawn":23019,"ĠVoy":23020,"phony":23021,"Items":23022,"ĠPierce":23023,"bard":23024,"Ġannotation":23025,"anton":23026,"illon":23027,"Impro":23028,"...)":23029,"Ġhappier":23030,"------":23031,"adjust":23032,"Ġstaffers":23033,"Ġactivism":23034,"Ġperf":23035,"Ġalright":23036,"Need":23037,"Ġcommence":23038,"Ġopioid":23039,"ĠAmanda":23040,"Es":23041,"ĠPars":23042,"ĠKaw":23043,"Works":23044,"248":23045,"Ġindo":23046,"tc":23047,"endant":23048,"ĠMoto":23049,"Ġlegalization":23050,"OTE":23051,"Ġtasked":23052,"Ġtsp":23053,"ĠACTIONS":23054,"166":23055,"Ġrefreshing":23056,"ĠNR":23057,"ĠPerez":23058,"Ġinfringement":23059,"SY":23060,"Listen":23061,"inning":23062,"ku":23063,"Ġrotate":23064,"program":23065,"arah":23066,"Design":23067,"Ġ(£":23068,"Ġstoring":23069,"Ġwarrants":23070,"Ġjudgement":23071,"ĠBrist":23072,"usually":23073,"photo":23074,"ĠRan":23075,"ĠPine":23076,"Ġoutrageous":23077,"ĠValentine":23078,"luence":23079,"ĠEverybody":23080,"Altern":23081,"Ġrelevance":23082,"Ġterminated":23083,"Ġdessert":23084,"Ġfulfilled":23085,"Ġprosecuted":23086,"ĠWords":23087,"Ġmigrant":23088,"Ġcultivation":23089,"ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ":23090,"idelity":23091,"ĠVern":23092,"ĠLogin":23093,"Ġmetaphor":23094,"ĠTip":23095,"Ġrecruits":23096,"ĠPig":23097,"ribing":23098,"Ġenthusiasts":23099,"exper":23100,"Ġfrightening":23101,"ĠHair":23102,"anson":23103,"strate":23104,"Ġhi":23105,"Height":23106,"Ġowning":23107,"none":23108,"Ġdislike":23109,"Ġknives":23110,"pherd":23111,"Ġloudly":23112,"ĠAPIs":23113,"Display":23114,"ĠLac":23115,"ĠUSS":23116,"abl":23117,"verages":23118,"Jew":23119,"Ġ172":23120,"ĠHistorical":23121,"atoon":23122,"ĠPhysics":23123,"intern":23124,"Ġwarmth":23125,"Ġtopp":23126,"DM":23127,"Ġgunman":23128,"Ġemperor":23129,"odi":23130,"ãĥ£":23131,"inatory":23132,"ĠRib":23133,"Ġ131":23134,"ĠSaturn":23135,"ĠShining":23136,"Ġwaking":23137,"Quotes":23138,"Ġcomedian":23139,"enberg":23140,"½":23141,"Ġbelievers":23142,"Ġpaperwork":23143,"custom":23144,"Ġlev":23145,"Ġlament":23146,"Ġpouring":23147,"222":23148,"political":23149,"ĠSupplement":23150,"maid":23151,"Ġcruelty":23152,"Ġtread":23153,"ysics":23154,"Aw":23155,"rites":23156,"Ġmodifier":23157,"ĠPosition":23158,"Adam":23159,"lb":23160,"ubs":23161,"Ġimperfect":23162,"Ġclusters":23163,"ĠEngineer":23164,"ĠCherry":23165,"Ġinauguration":23166,"ĠSau":23167,"Ġembodiment":23168,"ĠUncle":23169,"Ġoverr":23170,"Ġexplosions":23171,"cule":23172,"ĠPrinceton":23173,"ĠAndrea":23174,"Ġincorrectly":23175,"Ġearnest":23176,"Ġpilgr":23177,"ĠSprint":23178,"Ġsleeve":23179,"Ġhears":23180,"ĠAmazing":23181,"Ġbrowsing":23182,"agin":23183,"Ġhomeland":23184,"Ġhaw":23185,"Ġdiving":23186,"istered":23187,"178":23188,"Ġbargaining":23189,"ĠArcade":23190,"Ġdelegate":23191,"terson":23192,"................................................................":23193,"ĠJacksonville":23194,"275":23195,"Ġstagn":23196,"Ġadam":23197,"ĠSherman":23198,"CB":23199,"Ġsuburb":23200,"ĠFoods":23201,"Ġconverting":23202,"ĠArist":23203,"Ġchambers":23204,"love":23205,"Ġamino":23206,"ĠGan":23207,"Ġmadness":23208,"mc":23209,"ĠUSE":23210,"defined":23211,"Ġultr":23212,"indust":23213,"Ġwolves":23214,"lance":23215,"Additionally":23216,"Ġcracks":23217,"asia":23218,"ĠReason":23219,"ĠPump":23220,"Ġaccidental":23221,"ĠLaser":23222,"ĠRid":23223,"Ġinitialized":23224,"elli":23225,"Ġunnamed":23226,"Ġnoun":23227,"ĠPassed":23228,"Ġhostage":23229,"ĠEthiop":23230,"shirts":23231,"Ġunrel":23232,"ĠEmbassy":23233,"Ġ1941":23234,"Ġatoms":23235,"Ġpurported":23236,"164":23237,"ĠFi":23238,"Ġgallons":23239,"ĠMonica":23240,"Ġpg":23241,"enment":23242,"Ġsorted":23243,"ĠGospel":23244,"Ġheights":23245,"Ġtraced":23246,"Ġundergoing":23247,"Shell":23248,"Ġsacks":23249,"Ġproportions":23250,"Ġhalluc":23251,"Font":23252,"acet":23253,"Ġwarmer":23254,"ĠINTER":23255,"Ġgrabbing":23256,"Plug":23257,"Ġrealization":23258,"ĠBurke":23259,"Ġenchant":23260,"ATER":23261,"ĠSeed":23262,"Ġabundant":23263,"FM":23264,"Ġcivic":23265,"Vs":23266,"isi":23267,"Ġvow":23268,"Ġreper":23269,"ĠPartnership":23270,"Ġpenetration":23271,"Ġaxe":23272,"Ġshattered":23273,"ĠZombies":23274,"Ġvinyl":23275,"ĠAlert":23276,"eon":23277,"Ġobliged":23278,"ĠIllust":23279,"ĠPlaza":23280,"ĠFrontier":23281,"Ġdavidjl":23282,"ĠSerial":23283,"ĠHav":23284,"ĠNutrition":23285,"Bi":23286,"ĠâĸĪ":23287,"ĠJays":23288,"linux":23289,"Ġhurry":23290,"Ġvoy":23291,"Ġhopeless":23292,"ĠStealth":23293,"Ġãģ":23294,"essors":23295,"ttle":23296,"borg":23297,"ĠSafari":23298,"fell":23299,"Ġwary":23300,"due":23301,"ĠAbove":23302,"Ha":23303,"ELL":23304,"Ġnotor":23305,"ĠWon":23306,"Too":23307,"Ġoccupations":23308,"Ġpossessions":23309,"Ġinviting":23310,"Ġpredators":23311,"Ġaccelerated":23312,"Ġ157":23313,"uterte":23314,"ĠCube":23315,"east":23316,"account":23317,"Give":23318,"Ġtransplant":23319,"redients":23320,"idable":23321,"Ġscreenshots":23322,"ĠGund":23323,"ĠFS":23324,"Ġtravelers":23325,"Ġsensory":23326,"ĠFiat":23327,"ĠRockets":23328,"İĭ":23329,"_{":23330,"Friend":23331,"Ġcharming":23332,"ALS":23333,"Ġenjoyment":23334,"mph":23335,"Ġ5000":23336,"ĠREG":23337,"ÙĨ":23338,"bia":23339,"Ġcompilation":23340,"rost":23341,"ĠVP":23342,"ĠSchne":23343,"2019":23344,"Ġcopying":23345,"MORE":23346,"ĠFlore":23347,"falls":23348,"215":23349,"total":23350,"Ġdisciples":23351,"double":23352,"Ġexceeding":23353,"Ġsmashed":23354,"Ġconceptual":23355,"ĠRomania":23356,"ĠBrent":23357,"ĠICE":23358,"ĠTou":23359,"Ġgrap":23360,"Ġnails":23361,"189":23362,"ãĥĺ":23363,"Ġprocure":23364,"eur":23365,"Ġconfirming":23366,"ĠCec":23367,"awi":23368,"ĠEden":23369,"Ġng":23370,"Ġengineered":23371,"atics":23372,"Ġhooked":23373,"Ġdisgusting":23374,"ĠMurder":23375,"ãĤ¿":23376,"Library":23377,"Ġ168":23378,"Almost":23379,"hematic":23380,"Menu":23381,"ĠNotre":23382,"ĠJur":23383,"Ġkidnapped":23384,"Ġhacker":23385,"ĠJade":23386,"Ġcreepy":23387,"Ġdrawings":23388,"ĠSponsor":23389,"Ġcyclists":23390,"ĠGoblin":23391,"Ġoptimized":23392,"Ġstaged":23393,"ĠMcD":23394,"between":23395,"Age":23396,"eno":23397,"Sex":23398,"ĠWide":23399,"nings":23400,"avis":23401,"Ġincapable":23402,"ĠKob":23403,"Ġrewarding":23404,"ĠLone":23405,"olescent":23406,"Ġcontracted":23407,"Ġsticky":23408,"Jose":23409,"Ball":23410,"fest":23411,"ĠInput":23412,"ĠRecently":23413,"Ġtomat":23414,"square":23415,"Application":23416,"Ġnitrogen":23417,"Ġduplicate":23418,"ĠRecon":23419,"ĠDear":23420,"London":23421,"Ġintra":23422,"Ġdock":23423,"Ġoutreach":23424,"ĠMillion":23425,"Ġmammals":23426,"ampton":23427,"VAL":23428,"Ġsnaps":23429,"Ġdos":23430,"ĠWhole":23431,"ĠReady":23432,"Try":23433,"ĠWinnipeg":23434,"earance":23435,"Ġincurred":23436,"renched":23437,"ĠNSW":23438,"ilot":23439,"raine":23440,"Ġcube":23441,"got":23442,"Ġrunway":23443,"etermined":23444,"ĠHawks":23445,"Ġsurvivor":23446,"ĠWish":23447,"ĠDin":23448,"ĠDEF":23449,"ĠVault":23450,"187":23451,"Ġmushrooms":23452,"Ġcrisp":23453,"bey":23454,"ĠDiscovery":23455,"Ġdevelopmental":23456,"Ġparadigm":23457,"Ġchaotic":23458,"ĠTsu":23459,"Ġ333":23460,"bons":23461,"Ġbacterial":23462,"Ġcommits":23463,"Ġcosmic":23464,"Ġmega":23465,"ocative":23466,"ĠPaint":23467,"ophobic":23468,"Ġvain":23469,"Ġcarved":23470,"ĠThief":23471,"ĠGul":23472,"owship":23473,"Ġcites":23474,"ĠEdinburgh":23475,"Ġdiminished":23476,"Ġacknowledges":23477,"ĠKills":23478,"Ġmicrow":23479,"ĠHera":23480,"Ġseniors":23481,"Ġwhereby":23482,"Hop":23483,"atron":23484,"Ġunavailable":23485,"ĠNate":23486,"Ġ480":23487,"Ġslated":23488,"ĠRebecca":23489,"ĠBattery":23490,"Ġgrammar":23491,"Ġheadset":23492,"Ġcursor":23493,"Ġexcluding":23494,"anye":23495,"aundering":23496,"ebin":23497,"Ġfeasible":23498,"ĠPublishing":23499,"ĠLabs":23500,"ĠCliff":23501,"ĠFerrari":23502,"Ġpac":23503,"visible":23504,"marked":23505,"pell":23506,"Ġpolite":23507,"Ġstaggering":23508,"ĠGalactic":23509,"Ġsuperst":23510,"Ġparan":23511,"ĠOfficers":23512,"ãĢģ":23513,"Ġspecifics":23514,"ulus":23515,"239":23516,"ĠPaste":23517,"AMP":23518,"ĠPanama":23519,"ĠDelete":23520,"anguard":23521,"restrial":23522,"Ġheroic":23523,"ĠDy":23524,"اÙĦ":23525,"Ġincumbent":23526,"Ġcrunch":23527,"tro":23528,"Ġscoop":23529,"Ġblogger":23530,"Ġsellers":23531,"uren":23532,"Ġmedicines":23533,"ĠCaps":23534,"ĠAnimation":23535,"oxy":23536,"Ġoutward":23537,"Ġinquiries":23538,"229":23539,"Ġpsychologist":23540,"ĠSask":23541,"evil":23542,"Ġcontaminated":23543,"ãĤ¨":23544,"herence":23545,"Ġbranded":23546,"ĠAbdul":23547,"zh":23548,"Ġparagraphs":23549,"Ġmins":23550,"Ġcorrelated":23551,"erb":23552,"Ġimpart":23553,"Ġmilestone":23554,"ĠSolutions":23555,"otle":23556,"Ġundercover":23557,"Ġmarched":23558,"ĠChargers":23559,"fax":23560,"ĠSecrets":23561,"Ġruth":23562,"weather":23563,"Ġfeminine":23564,"Ġsham":23565,"Ġprestigious":23566,"iggins":23567,"Ġsung":23568,"history":23569,"ettle":23570,"ggie":23571,"Ġoutdated":23572,"oland":23573,"Ġperceptions":23574,"ĠSession":23575,"ĠDodgers":23576,"uj":23577,"ĠEND":23578,"Doc":23579,"Ġdeficiency":23580,"Grand":23581,"ĠJoker":23582,"Ġretrospect":23583,"Ġdiagnostic":23584,"Ġharmless":23585,"Ġrogue":23586,"ĠAval":23587,"Equ":23588,"Ġtransc":23589,"ĠRobertson":23590,"ĠDepending":23591,"ĠBurns":23592,"ivo":23593,"Ġhostility":23594,"Features":23595,"ĵĺ":23596,"Ġdiscomfort":23597,"ĠLCD":23598,"specified":23599,"ĠExpect":23600,"340":23601,"Ġimperative":23602,"ĠRegular":23603,"Chinese":23604,"Ġstatewide":23605,"Ġsymm":23606,"Ġloops":23607,"Ġautumn":23608,"Nick":23609,"Ġshaping":23610,"Ġquot":23611,"Ġcherry":23612,"ĠCrossref":23613,"è¦ļéĨĴ":23614,"Standard":23615,"heed":23616,"ĠDell":23617,"ĠVietnamese":23618,"Ġost":23619,"ĠValkyrie":23620,"OA":23621,"Assad":23622,"Ġrebound":23623,"ĠTraffic":23624,"places":23625,"æĺ":23626,"ĠBuc":23627,"172":23628,"Ġshelters":23629,"Ġinsisting":23630,"ĠCertainly":23631,"ĠKenneth":23632,"ĠTCP":23633,"Ġpenal":23634,"ĠReplay":23635,"heard":23636,"Ġdialect":23637,"iza":23638,"ĠFY":23639,"itcher":23640,"ĠDL":23641,"Ġspiral":23642,"Ġquarterbacks":23643,"Ġhull":23644,"Ġgoogle":23645,"Ġtodd":23646,"ĠSterling":23647,"ĠPlate":23648,"Ġspying":23649,"mbol":23650,"ĠRealm":23651,"ĠProced":23652,"ĠCrash":23653,"Ġterminate":23654,"Ġprotesting":23655,"Center":23656,"guided":23657,"Ġuncover":23658,"Ġboycott":23659,"Ġrealizes":23660,"sound":23661,"Ġpretending":23662,"ĠVas":23663,"1980":23664,"Ġframed":23665,"Ġ139":23666,"Ġdescended":23667,"Ġrehabilitation":23668,"Ġborrowing":23669,"ĠBuch":23670,"Ġblur":23671,"Ron":23672,"ĠFrozen":23673,"enza":23674,"Chief":23675,"ĠPoor":23676,"Ġtranslates":23677,"MIN":23678,"Ġ212":23679,"JECT":23680,"Ġerupted":23681,"Ġsuccesses":23682,"SEC":23683,"Ġplague":23684,"Ġgems":23685,"doms":23686,"Ġstretches":23687,"ĠSpy":23688,"Ġstorytelling":23689,"Credit":23690,"ĠPush":23691,"Ġtraction":23692,"Ġineffective":23693,"ĠLuna":23694,"Ġtapes":23695,"Ġanalytics":23696,"ercise":23697,"Ġprogrammes":23698,"ĠCarbon":23699,"Ġbehold":23700,"heavy":23701,"ĠConservation":23702,"ĠFIR":23703,"Ġsack":23704,"termin":23705,"ricks":23706,"Ġhoused":23707,"Ġunusually":23708,"Ice":23709,"Ġexecuting":23710,"ĠMoroc":23711,"eday":23712,"Ġeditions":23713,"Ġsmarter":23714,"ĠBA":23715,"Ġoutlaw":23716,"Ġvanished":23717,"iba":23718,"ALSE":23719,"ĠSilva":23720,"238":23721,"Could":23722,"Ġphilosopher":23723,"Ġevacuated":23724,"Secret":23725,"142":23726,"Ġvisas":23727,"ãĤ¬":23728,"ĠMalt":23729,"ĠClearly":23730,"ĠNiger":23731,"ĠCairo":23732,"ĠFist":23733,"380":23734,"ĠXML":23735,"auto":23736,"itant":23737,"Ġreinforced":23738,"Record":23739,"ĠSurvivor":23740,"GHz":23741,"Ġscrews":23742,"parents":23743,"Ġoceans":23744,"mares":23745,"Ġbrakes":23746,"vasive":23747,"Ġhello":23748,"ĠSIM":23749,"rimp":23750,"Ġore":23751,"ĠArmour":23752,"247":23753,"Ġterrific":23754,"Ġtones":23755,"141":23756,"ĠMinutes":23757,"Episode":23758,"Ġcurves":23759,"Ġinflammatory":23760,"Ġbatting":23761,"ĠBeautiful":23762,"Lay":23763,"Ġunpop":23764,"vable":23765,"Ġriots":23766,"ĠTactics":23767,"baugh":23768,"ĠCock":23769,"Ġorgasm":23770,"ĠSas":23771,"Ġconstructor":23772,"etz":23773,"Gov":23774,"Ġantagon":23775,"Ġtheat":23776,"Ġdeeds":23777,"hao":23778,"cuts":23779,"ĠMcCl":23780,"Ġum":23781,"ĠScientists":23782,"Ġgrassroots":23783,"yssey":23784,"\"]=>":23785,"Ġsurfaced":23786,"Ġshades":23787,"Ġneighbours":23788,"Ġadvertis":23789,"oya":23790,"Ġmerged":23791,"Upon":23792,"Ġgad":23793,"Ġanticipate":23794,"Anyway":23795,"Ġslogan":23796,"Ġdisrespect":23797,"Iran":23798,"ĠTB":23799,"acted":23800,"Ġsubpoen":23801,"mediately":23802,"OOOO":23803,"Ġwaiver":23804,"Ġvulnerabilities":23805,"ottesville":23806,"ĠHuffington":23807,"Josh":23808,"ĠDH":23809,"Monday":23810,"ĠEllen":23811,"Know":23812,"xon":23813,"items":23814,"228":23815,"Ġfills":23816,"ĠNike":23817,"Ġcumulative":23818,"andals":23819,"Ir":23820,"Ġì":23821,"Ġfriction":23822,"igator":23823,"Ġscans":23824,"ĠVienna":23825,"ldom":23826,"Ġperformers":23827,"Prim":23828,"Ġbidding":23829,"Mur":23830,"Ġleaned":23831,"ĠPrix":23832,"alks":23833,"Ġ[âĢ¦]":23834,"ĠTwitch":23835,"ĠDeveloper":23836,"ĠGir":23837,"Ġcallback":23838,"Abstract":23839,"Ġaccustomed":23840,"Ġfreedoms":23841,"ĠPG":23842,"uracy":23843,"Ġlump":23844,"isman":23845,",,,,":23846,"1992":23847,"ĠRED":23848,"Ġworm":23849,"Match":23850,"ĠPlatinum":23851,"IJ":23852,"ĠOwner":23853,"Trivia":23854,"compl":23855,"Ġnewborn":23856,"Ġfantas":23857,"Own":23858,"Ġ1959":23859,"Ġsympath":23860,"Ġubiqu":23861,"Ġoutputs":23862,"Ġallev":23863,"Ġprag":23864,"Kevin":23865,"Ġfavors":23866,"Ġburial":23867,"Ġnurt":23868,"solete":23869,"cache":23870,"Ġ156":23871,"Ġunlocks":23872,"techn":23873,"Making":23874,"Ġconquer":23875,"adic":23876,"æĸ":23877,"Ġelf":23878,"Ġelectorate":23879,"ĠKurds":23880,"ĠStack":23881,"ĠSamurai":23882,"Ġâĺħ":23883,"Ġ{}":23884,"ĠSaid":23885,"ĠFallout":23886,"Ġkindness":23887,"ĠCustoms":23888,"ĠBoulevard":23889,"Ġhelicopters":23890,"otics":23891,"ĠVeget":23892,"comment":23893,"Ġcriticised":23894,"Ġpolished":23895,"ĠRemix":23896,"ĠCultural":23897,"Ġrecons":23898,"Ġdoi":23899,"atem":23900,"Screen":23901,"Ġbarred":23902,"Comments":23903,"ĠGenerally":23904,"Ġslap":23905,"720":23906,"Vari":23907,"pine":23908,"Ġempt":23909,"Ġhats":23910,"ĠPlaying":23911,"lab":23912,"average":23913,"forms":23914,"ĠCotton":23915,"Ġcans":23916,"ĠDON":23917,"ĠSomalia":23918,"Crypt":23919,"ĠIncreases":23920,"Ever":23921,"modern":23922,"Ġsurgeon":23923,"3000":23924,"Ġrandomized":23925,"================================================================":23926,"Bern":23927,"impl":23928,"ĠCOR":23929,"Ġproclaim":23930,"thouse":23931,"Ġtoes":23932,"Ġample":23933,"Ġpreserving":23934,"Ġdisbel":23935,"grand":23936,"Besides":23937,"Ġsilk":23938,"ĠPattern":23939,"hm":23940,"Ġenterprises":23941,"Ġaffidavit":23942,"ĠAdvisory":23943,"Ġadvertised":23944,"ĠReligious":23945,"sections":23946,"psych":23947,"ĠFields":23948,"aways":23949,"Ġhashtag":23950,"ĠNightmare":23951,"Ġvampire":23952,"Ġforensic":23953,"rossover":23954,"nar":23955,"Ġnavy":23956,"Ġvacant":23957,"ĠDuel":23958,"Ġhallway":23959,"Ġfacebook":23960,"identally":23961,"ĠNRA":23962,"Ġmatt":23963,"Ġhurricane":23964,"ĠKirby":23965,"ĠPuzzle":23966,"Ġskirt":23967,"oust":23968,"dullah":23969,"Ġanalogy":23970,"inion":23971,"Ġtomatoes":23972,"ĠNV":23973,"ĠPeak":23974,"ĠMeyer":23975,"Ġappointments":23976,"Ġmasc":23977,"Ġalley":23978,"rehend":23979,"Ġcharities":23980,"Ġundo":23981,"Ġdestinations":23982,"ĠTesting":23983,"\">\"":24618,"cats":24619,"*.":24620,"Ġgestures":24621,"general":24622,"League":24623,"Ġpackets":24624,"ĠInspector":24625,"ĠBerg":24626,"Ġfraudulent":24627,"Ġcriticize":24628,"Fun":24629,"Ġblaming":24630,"ndra":24631,"Ġslash":24632,"ĠEston":24633,"Ġproposing":24634,"Ġwhales":24635,"Ġtherapist":24636,"Ġsubset":24637,"Ġleisure":24638,"ELD":24639,"ĠCVE":24640,"ĠActivity":24641,"Ġculmin":24642,"shop":24643,"ĠDAY":24644,"ischer":24645,"ĠAdmiral":24646,"ĠAttacks":24647,"Ġ1958":24648,"Ġmemoir":24649,"Ġfolded":24650,"Ġsexist":24651,"Ġ153":24652,"ĠLI":24653,"Ġreadings":24654,"Ġembarrassment":24655,"ĠEmployment":24656,"wart":24657,"chin":24658,"Ġcontinuation":24659,"lia":24660,"Recently":24661,"Ġduel":24662,"Ġevacuation":24663,"ĠKashmir":24664,"Ġdisposition":24665,"ĠRig":24666,"Ġbolts":24667,"Ġinsurers":24668,"467":24669,"Mex":24670,"Ġretaliation":24671,"Ġmisery":24672,"Ġunreasonable":24673,"raining":24674,"Imm":24675,"ĠPU":24676,"emer":24677,"Ġgenital":24678,"ãĤ³":24679,"ĠCandy":24680,"Ġonions":24681,"ĠPatt":24682,"liner":24683,"Ġconceded":24684,"Ġfa":24685,"Ġforc":24686,"ĠHernandez":24687,"ĠGeoff":24688,"debian":24689,"ĠTeams":24690,"Ġcries":24691,"Ġhomeowners":24692,"237":24693,"ABC":24694,"Ġstitch":24695,"Ġstatistic":24696,"Ġheaders":24697,"ĠBiology":24698,"Ġmotors":24699,"ĠGEN":24700,"ĠLip":24701,"Ġhates":24702,"Ġheel":24703,"Self":24704,"ipl":24705,"EDIT":24706,"orting":24707,"Ġannot":24708,"ĠSpeech":24709,"oldemort":24710,"ĠJavascript":24711,"ĠLeBron":24712,"Ġfootprint":24713,"Ġfn":24714,"Ġseizures":24715,"nas":24716,"hide":24717,"Ġ1954":24718,"ĠBee":24719,"ĠDeclaration":24720,"ĠKatie":24721,"Ġreservations":24722,"NR":24723,"female":24724,"Ġsaturated":24725,"Ġbiblical":24726,"Ġtrolls":24727,"Device":24728,"photos":24729,"Ġdrums":24730,"ãĥīãĥ©ãĤ´ãĥ³":24731,"Night":24732,"fighter":24733,"ĠHak":24734,"riber":24735,"Ġcush":24736,"Ġdisciplinary":24737,"baum":24738,"ĠGH":24739,"ĠSchmidt":24740,"ilibrium":24741,"Ġsixty":24742,"ĠKushner":24743,"rots":24744,"Ġpund":24745,"ĠRac":24746,"Ġsprings":24747,"Ġconve":24748,"Business":24749,"Fall":24750,"Ġqualifications":24751,"Ġverses":24752,"Ġnarciss":24753,"ĠKoh":24754,"ĠWow":24755,"ĠCharlottesville":24756,"edo":24757,"Ġinterrogation":24758,"ĠWool":24759,"365":24760,"Brian":24761,"Ġâľĵ":24762,"Ġalleges":24763,"onds":24764,"idation":24765,"ĠJackie":24766,"yu":24767,"Ġlakes":24768,"Ġworthwhile":24769,"Ġcrystals":24770,"ĠJuda":24771,"Ġcomprehend":24772,"Ġflush":24773,"Ġabsorption":24774,"ĠOC":24775,"Ġfrightened":24776,"ĠChocolate":24777,"Martin":24778,"Ġbuys":24779,"Ġbucks":24780,"Ġappell":24781,"ĠChampionships":24782,"Ġlistener":24783,"ĠDefensive":24784,"Ġcz":24785,"uds":24786,"ĠMate":24787,"Ġreplay":24788,"Ġdecorated":24789,"Ġsunk":24790,"ĠVIP":24791,"ĠAnk":24792,"Ġ195":24793,"aaaa":24794,"Nobody":24795,"ĠMilk":24796,"ĠGur":24797,"ĠMk":24798,"ĠSara":24799,"Ġseating":24800,"ĠWid":24801,"Track":24802,"Ġemploys":24803,"Ġgigantic":24804,"APP":24805,"ãĤ§":24806,"inventory":24807,"Ġtowel":24808,"atche":24809,"lasting":24810,"ĠTL":24811,"Ġlatency":24812,"Ġkne":24813,"Ber":24814,"meaning":24815,"Ġupheld":24816,"Ġplayground":24817,"Ġmant":24818,"Side":24819,"Ġstereo":24820,"Ġnorthwest":24821,"Ġexceptionally":24822,"Ġrays":24823,"Ġrecurring":24824,"Drive":24825,"Ġupright":24826,"Ġabduct":24827,"ĠMarathon":24828,"Ġgoodbye":24829,"Ġalphabet":24830,"hp":24831,"Ġcourtroom":24832,"rington":24833,"othing":24834,"Tag":24835,"Ġdiplomats":24836,"Ġbarbar":24837,"ĠAqua":24838,"183":24839,"3333":24840,"Ġmaturity":24841,"Ġinstability":24842,"ĠApache":24843,"Ġ===":24844,"Ġfasting":24845,"ĠGrid":24846,"ModLoader":24847,"Ġ152":24848,"Abs":24849,"ĠOperating":24850,"etti":24851,"Ġacquaint":24852,"Donnell":24853,"ĠKem":24854,"ĠForge":24855,"Ġarmored":24856,"Mil":24857,"Ġphilosophers":24858,"invest":24859,"Players":24860,"âĪ":24861,"Ġmyriad":24862,"Ġcomrades":24863,"Rot":24864,"Ġremembering":24865,"Ġcorresponds":24866,"Ġprogrammers":24867,"ĠLynn":24868,"Ġolig":24869,"Ġcoherent":24870,"ynchron":24871,"ĠChemical":24872,"Ġjugg":24873,"pair":24874,"posts":24875,"Eye":24876,"ĠInner":24877,"Ġsemester":24878,"ottest":24879,"ĠEmirates":24880,"ricanes":24881,"orously":24882,"mits":24883,"ĠWis":24884,"Ġdodge":24885,"location":24886,"Ġfaded":24887,"Amazon":24888,"ĠProceed":24889,"ĠINFO":24890,"journal":24891,"ĠTruck":24892,"Ten":24893,"Ġ217":24894,"Ġstatutes":24895,"mobile":24896,"ĠTypes":24897,"Recomm":24898,"buster":24899,"pex":24900,"Ġlegends":24901,"Ġheadache":24902,"faced":24903,"ĠWiFi":24904,"ifty":24905,"ĠHER":24906,"Ġcircuits":24907,"ERROR":24908,"226":24909,"olin":24910,"Ġcylinder":24911,"ospace":24912,"ikers":24913,"Prem":24914,"Quant":24915,"Ġconflicting":24916,"Ġslightest":24917,"Ġforged":24918,"ionage":24919,"Stephen":24920,"ĠKub":24921,"ĠOpportun":24922,"ĠHeal":24923,"Ġblo":24924,"Ġrulers":24925,"Ġhuh":24926,"Ġsubmarine":24927,"fy":24928,"asser":24929,"Ġallowance":24930,"ĠKasich":24931,"ĠTas":24932,"ĠAustralians":24933,"ForgeModLoader":24934,"ĠâĨij":24935,"ĠMatrix":24936,"amins":24937,"Ġ1200":24938,"ĠAcqu":24939,"236":24940,"Document":24941,"ĠBreaking":24942,"193":24943,"ĠSubst":24944,"ĠRoller":24945,"ĠProperties":24946,"ĠNI":24947,"tier":24948,"Ġcrushing":24949,"Ġadvocating":24950,"Furthermore":24951,"keepers":24952,"Ġsexism":24953,"xd":24954,"Ġcaller":24955,"ĠSense":24956,"chieve":24957,"ĠTF":24958,"Ġfueled":24959,"Ġreminiscent":24960,"Ġobsess":24961,"urst":24962,"Ġuphold":24963,"ĠFans":24964,"hetics":24965,"ĠâĹ":24966,"ĠBath":24967,"Ġbeverage":24968,"Ġoscill":24969,"254":24970,"Ġpoles":24971,"Ġgradual":24972,"Ġexting":24973,"ĠSuff":24974,"ĠSuddenly":24975,"Ġliking":24976,"Ġ1949":24977,"unciation":24978,"amination":24979,"ĠOmar":24980,"ĠLV":24981,"ĠConsequently":24982,"Ġsynthes":24983,"ĠGIF":24984,"Ġpains":24985,"Ġinteracting":24986,"uously":24987,"incre":24988,"Ġrumor":24989,"ĠScientology":24990,"197":24991,"ĠZig":24992,"Ġspelling":24993,"ĠASS":24994,"Ġextingu":24995,"mson":24996,"Ġgh":24997,"Ġremarked":24998,"ĠStrategic":24999,"ĠMON":25000,"å¥":25001,"gae":25002,"ĠWHAT":25003,"Eric":25004,"ĠCampus":25005,"Ġmethane":25006,"Ġimagin":25007,"JUST":25008,"ĠAlm":25009,"XT":25010,"iq":25011,"ĠRSS":25012,"Ġwrongdoing":25013,"atta":25014,"Ġbigot":25015,"Ġdemonstrators":25016,"ĠCalvin":25017,"ĠVilla":25018,"Ġmembrane":25019,"ĠAwesome":25020,"Ġbenefic":25021,"268":25022,"Ġmagnificent":25023,"ĠLots":25024,"Greg":25025,"ĠBoris":25026,"Ġdetainees":25027,"ĠHerman":25028,"Ġwhispered":25029,"Ġawe":25030,"Professor":25031,"funding":25032,"Ġphysiological":25033,"ĠDestruction":25034,"Ġlimb":25035,"Ġmanipulated":25036,"Ġbubbles":25037,"Ġpseud":25038,"Ġhydra":25039,"ĠBristol":25040,"Ġstellar":25041,"ĠExpansion":25042,"ĠKell":25043,"ĠInterestingly":25044,"Ġmans":25045,"Ġdragging":25046,"Ġecological":25047,"ĠFit":25048,"Ġgent":25049,"Ġbenefited":25050,"ĠHaiti":25051,"Ġpolyg":25052,"ãĥİ":25053,"Ġ2030":25054,"Ġprow":25055,"Ġreconstruction":25056,"Ġwast":25057,"Ġpsychic":25058,"ĠGreeks":25059,"Handler":25060,"162":25061,"ĠPulse":25062,"Ġsolicit":25063,"Ġsys":25064,"Ġinflux":25065,"ĠGentle":25066,"percent":25067,"Ġproliferation":25068,"Ġtaxable":25069,"Ġdisregard":25070,"Ġescaping":25071,"Ġginger":25072,"Ġwithstand":25073,"Ġdevastated":25074,"ĠDew":25075,"series":25076,"Ġinjected":25077,"elaide":25078,"Ġturnover":25079,"heat":25080,"ĻĤ":25081,"Happy":25082,"ĠSilent":25083,"ãĤŃ":25084,"ivism":25085,"Ġirrational":25086,"AMA":25087,"Ġreef":25088,"rub":25089,"Ġ162":25090,"Ġbankers":25091,"ĠEthics":25092,"vv":25093,"Ġcriticisms":25094,"Kn":25095,"186":25096,"Movie":25097,"ĠTories":25098,"Ġnood":25099,"Ġdistortion":25100,"False":25101,"odore":25102,"Ġtasty":25103,"Research":25104,"ĠUID":25105,"-)":25106,"Ġdivorced":25107,"ĠMU":25108,"ĠHayes":25109,"ĠIsn":25110,"iani":25111,"ĠHQ":25112,"Ġ\"#":25113,"ignant":25114,"Ġtraumatic":25115,"ĠLing":25116,"Hun":25117,"Ġsabot":25118,"online":25119,"random":25120,"Ġrenamed":25121,"rared":25122,"KA":25123,"dead":25124,"ét":25125,"ĠAssistance":25126,"Ġseaf":25127,"++++++++":25128,"Ġseldom":25129,"ĠWebb":25130,"Ġboolean":25131,"ulet":25132,"Ġrefrain":25133,"ĠDIY":25134,"rule":25135,"Ġshutting":25136,"Ġutilizing":25137,"loading":25138,"ĠParam":25139,"coal":25140,"ooter":25141,"Ġattracting":25142,"ĠDol":25143,"Ġhers":25144,"agnetic":25145,"ĠReach":25146,"imo":25147,"Ġdiscarded":25148,"ĠPip":25149,"015":25150,"ür":25151,"Ġmug":25152,"Imagine":25153,"COL":25154,"Ġcursed":25155,"ĠShows":25156,"ĠCurtis":25157,"ĠSachs":25158,"speaking":25159,"ĠVista":25160,"ĠFramework":25161,"ongo":25162,"Ġsubreddit":25163,"Ġcrus":25164,"ĠOval":25165,"Row":25166,"growing":25167,"Ġinstallment":25168,"Ġglac":25169,"ĠAdvance":25170,"ECK":25171,"ĠLGBTQ":25172,"LEY":25173,"Ġacet":25174,"Ġsuccessive":25175,"ĠNicole":25176,"Ġ1957":25177,"Quote":25178,"Ġcircumstance":25179,"ackets":25180,"Ġ142":25181,"ortium":25182,"Ġguessed":25183,"ĠFrame":25184,"Ġperpetrators":25185,"ĠAviation":25186,"ĠBench":25187,"Ġhandc":25188,"Ap":25189,"Ġ1956":25190,"259":25191,"rand":25192,"NetMessage":25193,"din":25194,"urtles":25195,"hig":25196,"ĠVIII":25197,"ffiti":25198,"ĠSwords":25199,"bial":25200,"Ġkidnapping":25201,"device":25202,"Ġbarn":25203,"ĠEli":25204,"aucas":25205,"Send":25206,"Constructed":25207,"Ġ½":25208,"Ġneedles":25209,"Ġadvertisements":25210,"Ġvou":25211,"Ġexhibited":25212,"ĠFortress":25213,"Ask":25214,"Berry":25215,"TYPE":25216,"Ġcancers":25217,"umping":25218,"ĠTerritory":25219,"Ġprud":25220,"Ġnas":25221,"Ġatheist":25222,"Ġbalances":25223,"ãģŁ":25224,"ĠShawn":25225,"&&":25226,"Ġlandsc":25227,"ĠRGB":25228,"Ġpetty":25229,"Ġexcellence":25230,"Ġtranslations":25231,"Ġparcel":25232,"ĠChev":25233,"East":25234,"ĠOutput":25235,"imi":25236,"Ġambient":25237,"ĠThreat":25238,"Ġvillains":25239,"Ġ550":25240,"ICA":25241,"Ġtaller":25242,"Ġleaking":25243,"cup":25244,"Ġpolish":25245,"Ġinfectious":25246,"ĠKC":25247,"Ġ@@":25248,"background":25249,"Ġbureaucracy":25250,"ĠSai":25251,"unless":25252,"itious":25253,"ĠSkype":25254,"Atl":25255,"IDENT":25256,"008":25257,"Ġhypocr":25258,"Ġpitchers":25259,"Ġguessing":25260,"ĠFINAL":25261,"Between":25262,"Ġvillagers":25263,"Ġ252":25264,"fashion":25265,"ĠTunis":25266,"Beh":25267,"ĠExc":25268,"ĠMID":25269,"288":25270,"ĠHaskell":25271,"196":25272,"ĠNOR":25273,"Ġspecs":25274,"Ġinvari":25275,"Ġglut":25276,"ĠCars":25277,"Ġimpulse":25278,"Ġhonors":25279,"gel":25280,"Ġjurisdictions":25281,"ĠBundle":25282,"ulas":25283,"California":25284,"ĠIncrease":25285,"Ġpear":25286,"Ġsingles":25287,"Ġcues":25288,"Ġunderwent":25289,"ĠWS":25290,"Ġexaggerated":25291,"Ġdubious":25292,"Ġflashing":25293,"LOG":25294,")].":25295,"Journal":25296,"tg":25297,"Van":25298,"ĠIstanbul":25299,"ĠInsp":25300,"ĠFranken":25301,"Draw":25302,"Ġsadness":25303,"Ġironic":25304,"ĠFry":25305,"xc":25306,"Ġ164":25307,"isch":25308,"Way":25309,"ĠProtestant":25310,"horn":25311,"Ġunaff":25312,"ĠViv":25313,"illas":25314,"ĠProductions":25315,"ĠHogan":25316,"Ġperimeter":25317,"ĠSisters":25318,"Ġspontaneous":25319,"Ġdownside":25320,"Ġdescendants":25321,"Ġorn":25322,"worm":25323,"Japanese":25324,"Ġ1955":25325,"Ġ151":25326,"ĠDoing":25327,"elsen":25328,"umbles":25329,"Ġradically":25330,"ĠDrum":25331,"ĠBach":25332,"Ġliabilities":25333,"ĠOB":25334,"ĠElementary":25335,"Ġmeme":25336,"ynes":25337,"Ġfingerprint":25338,"ĠGrab":25339,"Ġundertake":25340,"Members":25341,"ĠReader":25342,"ĠSims":25343,"god":25344,"Ġhypothetical":25345,"scient":25346,"ĠAJ":25347,"Ġcharism":25348,"Ġadmissions":25349,"ĠMissile":25350,"trade":25351,"Ġexercising":25352,"ĠBackground":25353,"Written":25354,"Ġvocals":25355,"whether":25356,"Ġvi":25357,"ĠWinner":25358,"Ġlitter":25359,"ĠShooting":25360,"STEM":25361,"ãĤ¡":25362,"ĠAFL":25363,"Ġvariability":25364,"Ġeats":25365,"ĠDPS":25366,"brow":25367,"Ġelephants":25368,"Ġstrat":25369,"ĠÅ":25370,"Ġsettlers":25371,"Matthew":25372,"Ġinadvert":25373,"HI":25374,"ĠIMF":25375,"ĠGoal":25376,"Ġnerves":25377,"Johnson":25378,"eye":25379,"ablishment":25380,"Thursday":25381,"BILITY":25382,"Had":25383,"amoto":25384,"hetamine":25385,"eps":25386,"Ġmitochond":25387,"Ġcompressed":25388,"ĠTrevor":25389,"ĠAnimals":25390,"Tool":25391,"Lock":25392,"Ġtweak":25393,"Ġpinch":25394,"Ġcancellation":25395,"Pot":25396,"Ġfocal":25397,"ĠAstron":25398,"173":25399,"ĠASC":25400,"ĠOTHER":25401,"umni":25402,"Ġdemise":25403,"dl":25404,"Ùħ":25405,"Semitism":25406,"Ġcracking":25407,"Ġcollaborative":25408,"Ġexplores":25409,"sql":25410,"Ġherbs":25411,"Ġconfigurations":25412,"mis":25413,"ĠResult":25414,"acey":25415,"ĠSmoke":25416,"Ġsanct":25417,"elia":25418,"Ġdegener":25419,"Ġdeepest":25420,"Ġscreamed":25421,"Ġnap":25422,"Software":25423,"ĠSTAR":25424,"EF":25425,"ĠXin":25426,"sponsored":25427,"manship":25428,"233":25429,"Ġprimaries":25430,"Ġfiltering":25431,"Ġassemble":25432,"mil":25433,"ĠMyers":25434,"bows":25435,"Ġpunched":25436,"Mic":25437,"Ġinnovations":25438,"Ġfunc":25439,"ando":25440,"Ġfracking":25441,"ĠVul":25442,"оÐ":25443,"oshop":25444,"ĠImmun":25445,"Ġsettling":25446,"Ġadolescents":25447,"Ġrebuilding":25448,"Ġtransforming":25449,"Ġparole":25450,"Ġharbor":25451,"Ġbooking":25452,"otional":25453,"ongevity":25454,"ĠYo":25455,"bug":25456,"Ġemerges":25457,"ĠMethods":25458,"ĠChu":25459,"Pres":25460,"ĠDungeons":25461,"Ġtrailing":25462,"ĠRum":25463,"ĠHugh":25464,"天":25465,"ĠEra":25466,"ĠBattles":25467,"Results":25468,"ĠTrading":25469,"Ġversa":25470,"css":25471,"axies":25472,"heet":25473,"Ġgreed":25474,"1989":25475,"Ġgardens":25476,"Ġcontingent":25477,"Park":25478,"ĠLeafs":25479,"hook":25480,"robe":25481,"Ġdiplomacy":25482,"ĠFuel":25483,"ĠInvasion":25484,"Ġupgrading":25485,"Male":25486,"Ġelic":25487,"Ġrelentless":25488,"ĠCovenant":25489,"apesh":25490,"ĠTrop":25491,"Ty":25492,"production":25493,"arty":25494,"Ġpunches":25495,"ako":25496,"cyclopedia":25497,"ĠRabbit":25498,"ĠHDMI":25499,"Ġ141":25500,"Ġfoil":25501,"ItemImage":25502,"ĠFG":25503,"Ġimplementations":25504,"ĠPom":25505,"ixtures":25506,"Ġawait":25507,"Ġ330":25508,"amus":25509,"Ġumbrella":25510,"Ġforesee":25511,"separ":25512,"Ġcircumcision":25513,"Ġperipheral":25514,"Say":25515,"ĠExpert":25516,"Inc":25517,"Ġwithdrew":25518,"ĠAnders":25519,"fried":25520,"Ġradioactive":25521,"ĠOpening":25522,"Ġboarding":25523,"ĠND":25524,"Ġoverthrow":25525,"Activ":25526,"WP":25527,"ĠActs":25528,"×Ļ":25529,"Ġmotions":25530,"vic":25531,"ĠMighty":25532,"ĠDefender":25533,"aer":25534,"Ġthankful":25535,"ĠKilling":25536,"ĠBris":25537,"moil":25538,"Ġpredicting":25539,"266":25540,"choice":25541,"Ġkillers":25542,"Ġincub":25543,"ĠChest":25544,"athering":25545,"Ġproclaimed":25546,"flower":25547,"ossom":25548,"umbledore":25549,"ĠCycling":25550,"ĠOccupy":25551,"AGES":25552,"Pen":25553,"ĠYug":25554,"Ġpackaged":25555,"Ġheightened":25556,"cot":25557,"stack":25558,"Cond":25559,"Ġstamps":25560,"mage":25561,"Ġpersuaded":25562,"Ġensl":25563,"ĠCardinal":25564,"Ġsolitary":25565,"Ġpossessing":25566,"ĠCork":25567,"Ġevid":25568,"ĠTay":25569,"Ġblues":25570,"Ġextremism":25571,"Ġlunar":25572,"Ġclown":25573,"Techn":25574,"Ġfestivals":25575,"ĠPvP":25576,"ĠLar":25577,"Ġconsequently":25578,"present":25579,"Ġsomeday":25580,"çİĭ":25581,"ĠMeteor":25582,"Ġtouring":25583,"culture":25584,"Ġbeaches":25585,"Ship":25586,"cause":25587,"ĠFlood":25588,"ãĥ¯":25589,"Ġpurity":25590,"those":25591,"Ġemission":25592,"bolt":25593,"Ġchord":25594,"ĠScripture":25595,"Lu":25596,"Ġ${":25597,"created":25598,"Others":25599,"258":25600,"Ġelemental":25601,"Ġannoyed":25602,"ĠAE":25603,"dan":25604,"ĠSag":25605,"Researchers":25606,"Ġfairy":25607,"âĢĵâĢĵ":25608,"============":25609,"Smart":25610,"GGGG":25611,"Ġskeletons":25612,"Ġpupils":25613,"linked":25614,"Ġurgency":25615,"enabled":25616,"ĠFuck":25617,"Ġcouncill":25618,"rab":25619,"UAL":25620,"TI":25621,"Ġlifes":25622,"Ġconfessed":25623,"Bug":25624,"Ġharmon":25625,"ĠCONFIG":25626,"ĠNeutral":25627,"Double":25628,"Ġstaple":25629,"ĠSHA":25630,"British":25631,"ĠSNP":25632,"ATOR":25633,"oco":25634,"Ġswinging":25635,"gex":25636,"oleon":25637,"plain":25638,"ĠMissing":25639,"ĠTrophy":25640,"vari":25641,"ranch":25642,"Ġ301":25643,"440":25644,"0000000000000000":25645,"Ġrestoring":25646,"Ġhaul":25647,"ucing":25648,"nerg":25649,"Ġfutures":25650,"Ġstrategist":25651,"question":25652,"Ġlateral":25653,"ĠBard":25654,"Ġsor":25655,"ĠRhodes":25656,"ĠDowntown":25657,"?????-":25658,"ĠLit":25659,"ĠBened":25660,"Ġcoil":25661,"street":25662,"ĠPortal":25663,"FILE":25664,"ĠGru":25665,"*,":25666,"231":25667,"neum":25668,"Ġsucked":25669,"Ġrapper":25670,"Ġtendencies":25671,"ĠLauren":25672,"cellaneous":25673,"267":25674,"Ġbrowse":25675,"Ġoverc":25676,"header":25677,"oise":25678,"Ġbeet":25679,"ĠGle":25680,"Stay":25681,"Ġmum":25682,"Ġtyped":25683,"Ġdiscounts":25684,"Talk":25685,"ĠOg":25686,"existing":25687,"ĠSell":25688,"uph":25689,"CI":25690,"ĠAustrian":25691,"ĠWarm":25692,"Ġdismissal":25693,"Ġaverages":25694,"camera":25695,"Ġallegiance":25696,"LAN":25697,"=\"#":25698,"Ġcommentators":25699,"ĠSetting":25700,"ĠMidwest":25701,"Ġpharmac":25702,"ĠEXP":25703,"Ġstainless":25704,"Chicago":25705,"Ġtan":25706,"244":25707,"Ġcountryside":25708,"ĠVac":25709,"295":25710,"Ġpinned":25711,"Ġcrises":25712,"Ġstandardized":25713,"Task":25714,"ĠJail":25715,"ĠDocker":25716,"colored":25717,"forth":25718,"\"},":25719,"Ġpatrons":25720,"Ġspice":25721,"Ġmourn":25722,"ĠMood":25723,"Ġlaundry":25724,"Ġequip":25725,"ĠMole":25726,"yll":25727,"ĠTHC":25728,"nation":25729,"ĠSherlock":25730,"Ġissu":25731,"ĠKre":25732,"ĠAmericas":25733,"ĠAAA":25734,"Ġsystematically":25735,"Ġcontra":25736,"ĠSally":25737,"Ġrationale":25738,"Ġcarriage":25739,"Ġpeaks":25740,"Ġcontradiction":25741,"ensation":25742,"ĠFailure":25743,"Ġprops":25744,"Ġnamespace":25745,"Ġcove":25746,"fields":25747,"ãĤĭ":25748,"Ġwool":25749,"ĠCatch":25750,"Ġpresumed":25751,"ĠDiana":25752,"ragon":25753,"igi":25754,"Ġhamm":25755,"Ġstunt":25756,"ĠGUI":25757,"ĠObservatory":25758,"ĠShore":25759,"Ġsmells":25760,"annah":25761,"Ġcockpit":25762,"ĠDuterte":25763,"850":25764,"Ġoppressed":25765,"breaker":25766,"ĠContribut":25767,"ĠPeru":25768,"ĠMonsanto":25769,"ĠAttempt":25770,"Ġcommanding":25771,"Ġfridge":25772,"ĠRin":25773,"ĠChess":25774,"uality":25775,"Ġol":25776,"Republican":25777,"ĠGlory":25778,"ĠWIN":25779,".......":25780,"agent":25781,"reading":25782,"Ġinh":25783,"Jones":25784,"Ġclicks":25785,"alan":25786,"Ġ[];":25787,"ĠMajesty":25788,"ĠCed":25789,"opus":25790,"atel":25791,"ê":25792,"ARC":25793,"ĠEcuador":25794,"ãĥł":25795,"ĠKuro":25796,"Ġrituals":25797,"Ġcaptive":25798,"Ġounce":25799,"Ġdisagreement":25800,"Ġslog":25801,"fuel":25802,"Pet":25803,"Mail":25804,"Ġexercised":25805,"Ġsolic":25806,"Ġrainfall":25807,"Ġdevotion":25808,"ĠAssessment":25809,"Ġrobotic":25810,"options":25811,"ĠRP":25812,"ĠFamilies":25813,"ĠFlames":25814,"Ġassignments":25815,"007":25816,"akedown":25817,"Ġvocabulary":25818,"Reilly":25819,"Ġcaval":25820,"gars":25821,"Ġsuppressed":25822,"ĠSET":25823,"ĠJohns":25824,"Ġwarp":25825,"broken":25826,"Ġstatues":25827,"Ġadvocated":25828,"Ġ275":25829,"Ġperil":25830,"omorph":25831,"ĠFemin":25832,"perfect":25833,"Ġhatch":25834,"Lib":25835,"512":25836,"Ġlifelong":25837,"313":25838,"Ġcheeks":25839,"Ġnumbered":25840,"ĠMug":25841,"Body":25842,"ravel":25843,"Weight":25844,"ĠJak":25845,"ĠHeath":25846,"Ġkissing":25847,"ĠJUST":25848,"Ġwaving":25849,"upload":25850,"Ġinsider":25851,"ĠProgressive":25852,"ĠFilter":25853,"tta":25854,"ĠBeam":25855,"Ġviolently":25856,"ipation":25857,"Ġskepticism":25858,"Ġ1918":25859,"ĠAnnie":25860,"ĠSI":25861,"Ġgenetics":25862,"Ġonboard":25863,"atl":25864,"ĠFriedman":25865,"ĠBri":25866,"ceptive":25867,"Ġpirate":25868,"ĠReporter":25869,"278":25870,"Ġmythology":25871,"Ġeclipse":25872,"Ġskins":25873,"Ġglyph":25874,"ingham":25875,"Files":25876,"Cour":25877,"women":25878,"Ġregimes":25879,"Ġphotographed":25880,"Kat":25881,"ĠMAX":25882,"Officials":25883,"Ġunexpectedly":25884,"Ġimpressions":25885,"Front":25886,";;;;;;;;":25887,"Ġsupremacy":25888,"Ġsang":25889,"Ġaggravated":25890,"Ġabruptly":25891,"ĠSector":25892,"Ġexcuses":25893,"Ġcosting":25894,"idepress":25895,"Stack":25896,"ĠRNA":25897,"obil":25898,"Ġghosts":25899,"ldon":25900,"atibility":25901,"Topics":25902,"Ġreimburse":25903,"ĠHM":25904,"ĠDeg":25905,"Ġthief":25906,"yet":25907,"ogenesis":25908,"leaning":25909,"ĠKol":25910,"ĠBasketball":25911,"Ġfi":25912,"ĠSeeing":25913,"Ġrecycling":25914,"Ġ[-":25915,"Congress":25916,"Ġlectures":25917,"Psy":25918,"Ġnep":25919,"Ġmaid":25920,"Ġoriented":25921,"AX":25922,"Ġrespectful":25923,"rene":25924,"flush":25925,"ĠUnloaded":25926,"request":25927,"grid":25928,"ĠAlternatively":25929,"ĠHugo":25930,"Ġdecree":25931,"ĠBuddhism":25932,"andum":25933,"Android":25934,"ĠCongo":25935,"ĠJoyce":25936,"Ġacknowledging":25937,"hesive":25938,"ĠTomorrow":25939,"ĠHiro":25940,"thren":25941,"ĠMaced":25942,"Ġhoax":25943,"ĠIncreased":25944,"ĠPradesh":25945,"Wild":25946,"______":25947,"161":25948,"Ġaunt":25949,"Ġdistributing":25950,"ĠTucker":25951,"ĠSSL":25952,"ĠWolves":25953,"Building":25954,"oult":25955,"ĠLuo":25956,"ĠYas":25957,"ĠSpir":25958,"ĠShape":25959,"ĠCambod":25960,"ĠIPv":25961,"Ġml":25962,"Ġextrad":25963,"390":25964,"ĠPenny":25965,"dream":25966,"Ġstationed":25967,"optional":25968,"eworthy":25969,".":26700,"ĠWorkshop":26701,"ĠRetail":26702,"ĠAvatar":26703,"625":26704,"Na":26705,"ĠVC":26706,"ĠSecure":26707,"MY":26708,"1988":26709,"ossip":26710,"Ġprostate":26711,"Ġunden":26712,"Ġgamer":26713,"ĠContents":26714,"ĠWarhammer":26715,"ĠSentinel":26716,"310":26717,"Ġsegregation":26718,"ĠFlex":26719,"ĠMAY":26720,"Ġdrills":26721,"ĠDrugs":26722,"Islamic":26723,"Ġspur":26724,"Ġcafe":26725,"Ġimaginary":26726,"Ġguiding":26727,"Ġswings":26728,"ĠTheme":26729,"oby":26730,"Ġnud":26731,"Ġbegging":26732,"Ġstrongh":26733,"Ġrejecting":26734,"Ġpedestrians":26735,"ĠProspect":26736,"Rare":26737,"sle":26738,"Ġconcessions":26739,"ĠConstitutional":26740,"Ġbeams":26741,"Ġfibers":26742,"poon":26743,"Ġinstincts":26744,"property":26745,"ĠBIG":26746,"Sanders":26747,"imates":26748,"Ġcoating":26749,"Ġcorpses":26750,"ĠTRUE":26751,"checked":26752,"Ġ166":26753,"Ash":26754,"ĠJS":26755,"ĠFiction":26756,"Ġcommunal":26757,"Ġenergetic":26758,"oooooooo":26759,"Ġnowadays":26760,"ILD":26761,"ibo":26762,"ĠSUV":26763,"Ren":26764,"Ġdwelling":26765,"Silver":26766,"Ġtally":26767,"ĠMoving":26768,"Ġcoward":26769,"Ġgenerals":26770,"Ġhorns":26771,"Ġcirculated":26772,"Ġrobbed":26773,"ĠUnlimited":26774,"Ġharassed":26775,"Ġinhibit":26776,"Ġcomposer":26777,"ĠSpotify":26778,"Ġspreads":26779,"364":26780,"Ġsuicidal":26781,"Ġnoises":26782,"ĠStur":26783,"Ġsaga":26784,"ĠKag":26785,"iso":26786,"Ġtheoretically":26787,"Money":26788,"Ġsimilarity":26789,"Ġsliced":26790,"utils":26791,"inges":26792,"\"-":26793,"Ġanth":26794,"Ġimped":26795,"Module":26796,"Throughout":26797,"Ġmenus":26798,"committee":26799,"andi":26800,"obj":26801,"inav":26802,"fired":26803,"ĠAbdullah":26804,"Ġundead":26805,"Ġfonts":26806,"Hold":26807,"ENG":26808,"Ġsustainability":26809,"Ġflick":26810,"Ġrazor":26811,"ĠFest":26812,"ĠCharacters":26813,"Ġwording":26814,"Ġpopulist":26815,"Ġcriticizing":26816,"Ġmuse":26817,"vine":26818,"Ġcardboard":26819,"Ġkindly":26820,"Ġfringe":26821,"ĠTheft":26822,"icultural":26823,"Ġgovernors":26824,"Ġ����":26825,"Ġ163":26826,"Ġtimeout":26827,"ĠAuth":26828,"Children":26829,"AU":26830,"Ġredemption":26831,"ĠAlger":26832,"Ġ1914":26833,"Ġwaved":26834,"Ġastronauts":26835,"ograms":26836,"Ġswamp":26837,"ĠFinnish":26838,"Ġcandle":26839,"Ġtonnes":26840,"utm":26841,"Ġray":26842,"Ġspun":26843,"Ġfearful":26844,"articles":26845,"Ġcaus":26846,"orically":26847,"ĠRequires":26848,"ĠGol":26849,"Ġpope":26850,"Ġinaugural":26851,"Ġgle":26852,"ADA":26853,"ĠISIL":26854,"ĠOffensive":26855,"Ġwatchdog":26856,"Ġbalcon":26857,"entity":26858,"ĠHoo":26859,"Ġgallon":26860,"ACC":26861,"Ġdoubling":26862,"Ġimplication":26863,"ĠSight":26864,"Ġdoctr":26865,"-------":26866,"Ġ\\\\":26867,"Ġmalt":26868,"Roll":26869,"Ġâī¥":26870,"Ġrecap":26871,"adding":26872,"uces":26873,"ĠBend":26874,"figure":26875,"Ġturkey":26876,"Ġsocietal":26877,"ĠTickets":26878,"Ġcommercially":26879,"Ġspicy":26880,"Ġ216":26881,"ĠRamp":26882,"Ġsuperiority":26883,"ï":26884,"ĠTracker":26885,"Carl":26886,"ĠCoy":26887,"ĠPatriot":26888,"Ġconsulted":26889,"Ġlistings":26890,"Ġslew":26891,"reenshot":26892,"ĠGone":26893,"Ġ[...]":26894,"309":26895,"Ġhottest":26896,"ر":26897,"Ġrocky":26898,"ĠDiaz":26899,"Ġmassage":26900,"Ġparaly":26901,"Ġpony":26902,"Az":26903,"Ġcartridge":26904,"ĠNZ":26905,"Ġsnack":26906,"ĠLamar":26907,"plement":26908,"ĠLeslie":26909,"Ġmater":26910,"Ġsnipp":26911,"246":26912,"Ġjointly":26913,"ĠBrisbane":26914,"ĠiPod":26915,"Ġpumping":26916,"Ġgoat":26917,"ĠSharon":26918,"ealing":26919,"Ġcoron":26920,"Ġanomal":26921,"rahim":26922,"ĠConnection":26923,"Ġsculpture":26924,"Ġscheduling":26925,"ĠDaddy":26926,"athing":26927,"Ġeyebrows":26928,"Ġcurved":26929,"Ġsentiments":26930,"Ġdrafting":26931,"Drop":26932,"([":26933,"Ġnominal":26934,"ĠLeadership":26935,"ĠGrow":26936,"Ġ176":26937,"Ġconstructive":26938,"ivation":26939,"Ġcorrupted":26940,"gerald":26941,"ĠCros":26942,"ĠChester":26943,"ĠLap":26944,"ãģª":26945,"OTH":26946,"DATA":26947,"Ġalmond":26948,"probably":26949,"Imp":26950,"Ġfeast":26951,"ĠWarcraft":26952,"Flor":26953,"Ġcheckpoint":26954,"Ġtranscription":26955,"Ġ204":26956,"Ġtweaks":26957,"Ġrelieve":26958,"Science":26959,"Ġperformer":26960,"Zone":26961,"Ġturmoil":26962,"igated":26963,"hibit":26964,"ĠCafe":26965,"themed":26966,"Ġfluor":26967,"bench":26968,"Ġdecom":26969,"ĠUnt":26970,"ĠBarrett":26971,"ĠFacts":26972,"Ġtasting":26973,"ĠPTSD":26974,"ĠSeal":26975,"ĠJudaism":26976,"ĠDynamic":26977,"ĠCors":26978,"Ve":26979,"ĠMing":26980,"ĠTransform":26981,"von":26982,"ĠDefenders":26983,"ĠTactical":26984,"ĠVon":26985,"ĠUnivers":26986,"Ġdistorted":26987,"ĠBreath":26988,"?'\"":26989,"Ġagon":26990,"ĠDeadly":26991,"Ġlan":26992,"ĠCycle":26993,"orned":26994,"Ġreliably":26995,"Ġglor":26996,"ĠMonkey":26997,"ãĥ¡":26998,"Ġadren":26999,"Ġmicrowave":27000,"ĠAlban":27001,"ircraft":27002,"digit":27003,"smart":27004,"ĠDread":27005,"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯":27006,"{{":27007,"ĠRochester":27008,"Ġsimplified":27009,"Ġinflicted":27010,"Ġtakeover":27011,"Ġyourselves":27012,"aditional":27013,"Ġmuscular":27014,"KS":27015,"Ġingen":27016,"Tax":27017,"ĠFeature":27018,"277":27019,"Ġcruc":27020,"Ġcrate":27021,"Ġunidentified":27022,"Ġacclaimed":27023,"ĠManga":27024,"ĠFrances":27025,"ĠNepal":27026,"ĠGerald":27027,"ĠKuwait":27028,"Ġslain":27029,"ĠHeb":27030,"ĠGoku":27031,"ãģ®æ":27032,"286":27033,"Mrs":27034,"ĠCody":27035,"ĠSanctuary":27036,"016":27037,"Ġdismant":27038,"Ġdataset":27039,"ĠHond":27040,"buck":27041,"ĠPatterson":27042,"Ġpalette":27043,"ĠGD":27044,"icol":27045,"ĠLodge":27046,"Ġplanetary":27047,"akin":27048,"ĠRegistered":27049,"abwe":27050,"ĠPetersburg":27051,"Ġhailed":27052,"ĠPiece":27053,"Sche":27054,"ĠDOJ":27055,"Ġenumer":27056,"181":27057,"ĠObserver":27058,"ĠBold":27059,"founded":27060,"commerce":27061,"Ġexploits":27062,"ĠFinding":27063,"URN":27064,"ĠSne":27065,"ĠAcid":27066,"ayette":27067,"ĠValues":27068,"Ġdrastic":27069,"Ġarchitectural":27070,"Ġ\".":27071,"×ķ":27072,"umped":27073,"Ġwrapping":27074,"Ġwidow":27075,"ĠSlayer":27076,"lace":27077,"once":27078,"Germany":27079,"avoid":27080,"Ġtemples":27081,"PAR":27082,"ô":27083,"ĠLucifer":27084,"ĠFlickr":27085,"lov":27086,"forces":27087,"Ġscouting":27088,"Ġlouder":27089,"tesy":27090,"Ġbeforehand":27091,"Äĵ":27092,"ĠNeon":27093,"ĠWol":27094,"ĠTypically":27095,"ĠPolitico":27096,"-+-+":27097,"Ġbuilder":27098,"Ġderive":27099,"Kill":27100,"Ġpoker":27101,"Ġambiguous":27102,"Ġlifts":27103,"Ġcyt":27104,"Ġribs":27105,"oodle":27106,"ĠSounds":27107,"hair":27108,"ĠSyndrome":27109,"tf":27110,"Ġproportional":27111,"uid":27112,"Ġpertaining":27113,"ĠKindle":27114,"ĠNegro":27115,"Ġreiterated":27116,"ĠTonight":27117,"oths":27118,"ĠCornell":27119,"Ġowing":27120,"Ġ208":27121,"elfare":27122,"ocating":27123,"ĠBirds":27124,"Subscribe":27125,"Ġessays":27126,"Ġburdens":27127,"Ġillustrations":27128,"arious":27129,"ERAL":27130,"ĠCalcul":27131,"Ġxen":27132,"ĠLinkedIn":27133,"ĠJung":27134,"Ġredesign":27135,"Connor":27136,"296":27137,"Ġreversal":27138,"ĠAdelaide":27139,"ĠLL":27140,"Ġsinking":27141,"Ġgum":27142,"USH":27143,"capt":27144,"ĠGrimm":27145,"Ġfootsteps":27146,"ĠCBD":27147,"ispers":27148,"Ġprose":27149,"Wednesday":27150,"ĠMovies":27151,"edin":27152,"Ġoverturned":27153,"Ġcontentious":27154,"USB":27155,"~~~~~~~~~~~~~~~~":27156,"ĠCopper":27157,"Ġpointless":27158,"NV":27159,"values":27160,"olphin":27161,"dain":27162,"Ġdeposited":27163,"ĠGW":27164,"Ġpreceded":27165,"ĠCla":27166,"ĠGolem":27167,"ĠNim":27168,"Ġβ":27169,"ĠEngineers":27170,"middle":27171,"Ġflatt":27172,"operative":27173,"Ġcouncils":27174,"imbabwe":27175,"elin":27176,"Ġstressful":27177,"ĠLD":27178,"Ġresh":27179,"lake":27180,"Ġwheelchair":27181,"ĠAlternative":27182,"Ġoptimize":27183,"operation":27184,"Ġpeek":27185,"Ġoneself":27186,"igil":27187,"Ġtransitions":27188,"opathy":27189,"blank":27190,"Ġ169":27191,"171":27192,"________________________________________________________________":27193,"Ġlaundering":27194,"Enc":27195,"ĠDEC":27196,"Ġworkouts":27197,"Ġspikes":27198,"Ġdinosaurs":27199,"Ġdiscriminatory":27200,"Pool":27201,"Rather":27202,"385":27203,"RNA":27204,"testers":27205,"eto":27206,"ĠIdentity":27207,"Ġvein":27208,"ĠBurton":27209,"Ġarcade":27210,"420":27211,"Ultimately":27212,"ĠSadly":27213,"ð":27214,"pill":27215,"Ġcubic":27216,"ĠSpectrum":27217,"these":27218,"states":27219,"Ġunofficial":27220,"hawks":27221,"ĠEVERY":27222,"Ġrainbow":27223,"Ġincarceration":27224,"anding":27225,"Ġsyll":27226,"ĠEverton":27227,"Ġ179":27228,"ĠSerbia":27229,"Ġ189":27230,"meter":27231,"ĠMickey":27232,"Ġantiqu":27233,"Ġfactual":27234,"neck":27235,"ĠNare":27236,"norm":27237,"must":27238,"Ġhighways":27239,"Ġglam":27240,"Ġdividing":27241,"ĠSquadron":27242,"ĠMartha":27243,"Ġbirths":27244,"Cover":27245,"////////////////":27246,"ĠWong":27247,"Phot":27248,"ĠALS":27249,"rio":27250,"ĠNonetheless":27251,"ĠLemon":27252,"Ġ206":27253,"ĠEE":27254,"Ġderivative":27255,"ĠWWII":27256,"vote":27257,"Ġtherein":27258,"Ġseparating":27259,"446":27260,"sync":27261,"ĠStreets":27262,"Ġratt":27263,"Ġmunicipality":27264,"ĠShortly":27265,"Ġmonk":27266,"),\"":27267,"Ġscrub":27268,"Ġoperatives":27269,"Neither":27270,"Place":27271,"ĠLimit":27272,"Female":27273,"ĠActor":27274,"Character":27275,"Ġconstituted":27276,"357":27277,"Ġprotested":27278,"ĠStraw":27279,"ĠHeight":27280,"ilda":27281,"ĠTyph":27282,"Ġfloods":27283,"Ġcosmetic":27284,"WAY":27285,"perture":27286,"upon":27287,"tons":27288,"essing":27289,"ĠPocket":27290,"Ġrooft":27291,"ĠCaucas":27292,"Ġantidepress":27293,"Ġincompatible":27294,"ECD":27295,"Ġopera":27296,"ĠContest":27297,"Ġgenerators":27298,"lime":27299,"Defense":27300,"1987":27301,"forum":27302,"Ġsavage":27303,"ĠHungarian":27304,"nz":27305,"Ġmetallic":27306,"Ġexpelled":27307,"Ġresidency":27308,"Ġdresses":27309,"666":27310,"ĠClement":27311,"fires":27312,"Category":27313,"Ġgeek":27314,"alis":27315,"Ġcemetery":27316,"educated":27317,"Ġcrawl":27318,"ĠUnable":27319,"ĠTyson":27320,"akis":27321,"Ġpardon":27322,"ĠWra":27323,"Ġstrengthened":27324,"ĠFors":27325,"335":27326,"ĠHC":27327,"ĠMond":27328,"Ġvisuals":27329,"ĠBeatles":27330,"ettlement":27331,"Ġï":27332,"gro":27333,"Ġbash":27334,"Ġpoorest":27335,"Ġexcel":27336,"Ġaspirations":27337,"ĠMunicip":27338,"ensible":27339,"Ġceremonies":27340,"Ġintimidation":27341,"ĠCONTR":27342,"beck":27343,"ĠKap":27344,"asu":27345,"Ġtrademarks":27346,"ĠSew":27347,"ĠCompetition":27348,"network":27349,"ĠArri":27350,"ĠTet":27351,"Roaming":27352,"WC":27353,"Dat":27354,"Ġsob":27355,"Ġpairing":27356,"Ġoverdose":27357,"SAY":27358,"aber":27359,"Ġrevolt":27360,"ĠFah":27361,"acting":27362,"eq":27363,"estation":27364,"Fight":27365,"ĠMarks":27366,"273":27367,"Ġ178":27368,"Raw":27369,"ãģĭ":27370,"349":27371,"blocks":27372,"Ġverge":27373,"estine":27374,"ĠPodesta":27375,"Ġinvasive":27376,"Ġprofoundly":27377,"ĠAo":27378,"each":27379,"Ġlest":27380,"interpret":27381,"Ġshrinking":27382,"Ġerrone":27383,"Ġchees":27384,"lys":27385,"ĠIvy":27386,"ĠDirectory":27387,"Ġhinted":27388,"VICE":27389,"Ġcontacting":27390,"ĠGent":27391,"hei":27392,"Ġlabeling":27393,"Ġmercury":27394,"ĠLite":27395,"Ġexpires":27396,"Ġdestabil":27397,"ritis":27398,"cu":27399,"Ġfeathers":27400,"Ġsteer":27401,"Ġprogrammed":27402,"ĠVader":27403,"Going":27404,"ĠElim":27405,"Ġyo":27406,"ĠMiche":27407,"Ġ203":27408,"Ġsleeves":27409,"Ġbully":27410,"ĠHumans":27411,"368":27412,"Ġcompress":27413,"ĠBanner":27414,"ARS":27415,"Ġawhile":27416,"Ġcalib":27417,"Ġsponsorship":27418,"ĠDifficulty":27419,"ĠPapers":27420,"Ġidentifier":27421,"}.":27422,"Ġyog":27423,"ĠShia":27424,"Ġcleanup":27425,"Ġvibe":27426,"introdu":27427,"imming":27428,"Australia":27429,"Ġoutlines":27430,"ĠYoutube":27431,"train":27432,"ĠMakes":27433,"Ġdeported":27434,"Ġcentr":27435,"ĠDug":27436,"ĠBoulder":27437,"ĠBuffy":27438,"Ġinjunction":27439,"ĠHarley":27440,"ĠGroups":27441,"ĠDumbledore":27442,"ĠClara":27443,"Ġ\"-":27444,"Ġsacrificed":27445,"eph":27446,"Shadow":27447,"ibling":27448,"Ġfreelance":27449,"Ġevidently":27450,"phal":27451,"Ġretains":27452,"Mir":27453,"Ġfinite":27454,"dar":27455,"ĠCous":27456,"Ġrepaired":27457,"Ġperiodic":27458,"Ġchampionships":27459,"Ġasteroid":27460,"blind":27461,"Ġexpressly":27462,"ĠAstros":27463,"Ġscaled":27464,"Ġgeographical":27465,"ĠRapids":27466,"Enjoy":27467,"Ġelastic":27468,"ĠMohamed":27469,"Market":27470,"begin":27471,"Ġdiscovers":27472,"Ġtelecommunications":27473,"Ġscanner":27474,"Ġenlarge":27475,"Ġsharks":27476,"Ġpsychedel":27477,"ĠRouge":27478,"Ġsnapshot":27479,"isine":27480,"XP":27481,"Ġpesticides":27482,"ĠLSD":27483,"ĠDistribution":27484,"really":27485,"Ġdegradation":27486,"Ġdisguise":27487,"Ġbiom":27488,"ĠEXT":27489,"Ġequations":27490,"Ġhazards":27491,"ĠCompared":27492,")*":27493,"Ġvirtues":27494,"Ġelders":27495,"Ġenhancing":27496,"ĠAcross":27497,"eros":27498,"angling":27499,"Ġcombust":27500,"ucci":27501,"Ġconcussion":27502,"Ġcontraception":27503,"ĠKang":27504,"Ġexpresses":27505,"Ġaux":27506,"ĠPione":27507,"Ġexhibits":27508,"Debug":27509,"OTAL":27510,"ĠAlready":27511,"ĠWheeler":27512,"Ġexpands":27513,"?:":27514,"Ġreconciliation":27515,"Ġpirates":27516,"Ġpurse":27517,"Ġdiscourage":27518,"Ġspectacle":27519,"Rank":27520,"Ġwraps":27521,"ĠThought":27522,"Ġimpending":27523,"Opp":27524,"ĠAnglo":27525,"ĠEUR":27526,"Ġscrewed":27527,"retched":27528,"Ġencouragement":27529,"models":27530,"Ġconfuse":27531,"mmm":27532,"ĠVitamin":27533,"âĸijâĸij":27534,"Cru":27535,"Ġknights":27536,"Ġdiscard":27537,"Ġbishops":27538,"ĠWear":27539,"ĠGarrett":27540,"kan":27541,"ãĥŁ":27542,"Ġmasculine":27543,"capital":27544,"ĠAus":27545,"Ġfatally":27546,"thanks":27547,"ĠAU":27548,"ĠGut":27549,"1200":27550,"Ġ00000000":27551,"Ġsurrog":27552,"ĠBIOS":27553,"raits":27554,"ĠWatts":27555,"Ġresurrection":27556,"ĠElectoral":27557,"ĠTips":27558,"4000":27559,"Ġnutrient":27560,"Ġdepicting":27561,"Ġsprink":27562,"Ġmuff":27563,"ĠLIM":27564,"ĠSample":27565,"psc":27566,"ibi":27567,"generated":27568,"Ġspecimens":27569,"Ġdissatisf":27570,"Ġtailored":27571,"Ġholdings":27572,"ĠMonthly":27573,"ĠEat":27574,"poons":27575,"Ġnec":27576,"ĠCage":27577,"ĠLotus":27578,"ĠLantern":27579,"Ġfrontier":27580,"Ġpensions":27581,"Ġjoked":27582,"ĠHardy":27583,"=-=-=-=-":27584,"rade":27585,"UID":27586,"Ġrails":27587,"Ġemit":27588,"Ġslate":27589,"Ġsmug":27590,"Ġspit":27591,"ĠCalls":27592,"ĠJacobs":27593,"feat":27594,"ĠUE":27595,"Ġrestruct":27596,"Ġregeneration":27597,"Ġenergies":27598,"ĠConnor":27599,"OHN":27600,"ĠCheese":27601,"Ġger":27602,"Ġresurrect":27603,"management":27604,"NW":27605,"Ġpresently":27606,"ĠBruins":27607,"Member":27608,"ĠMang":27609,"idan":27610,"Ġboosting":27611,"wyn":27612,"+.":27613,"requisite":27614,"ĠNYPD":27615,"ĠMegan":27616,"ĠConditions":27617,"Ġpics":27618,"nesium":27619,"ĠRash":27620,"Ġ174":27621,"ĠDucks":27622,"Ġembro":27623,"zu":27624,"onian":27625,"religious":27626,"Ġcraz":27627,"ĠACA":27628,"ĠZucker":27629,"EMA":27630,"ĠPros":27631,"Weapon":27632,"ĠKnox":27633,"ĠArduino":27634,"Ġstove":27635,"Ġheavens":27636,"ĠPurchase":27637,"Ġherd":27638,"Ġfundraiser":27639,"Digital":27640,"5000":27641,"Ġproponents":27642,"/âĢĭ":27643,"Ġjelly":27644,"ĠVisa":27645,"Ġmonks":27646,"Ġadvancement":27647,"ĠWer":27648,"Ġ187":27649,"eus":27650,"ertility":27651,"Ġfetal":27652,"Ġ1936":27653,"Lo":27654,"Ġoutfits":27655,"Ġstaircase":27656,"bomb":27657,"Ġcustomized":27658,"clair":27659,"Tree":27660,"Ġmapped":27661,"ĠConsidering":27662,"ĠTorres":27663,"Ġmethyl":27664,"Ġapproximate":27665,"Ġdoom":27666,"ĠHansen":27667,"Ġcrossover":27668,"Ġstandalone":27669,"ä¼":27670,"Ġinvites":27671,"Ġgraveyard":27672,"Ġhp":27673,"DonaldTrump":27674,"Ġescort":27675,"Gar":27676,"Ġpredecessors":27677,"Ġhay":27678,"Ġenzyme":27679,"ĠStraight":27680,"visors":27681,"Ing":27682,"aneously":27683,"ĠApplied":27684,"Ġfec":27685,"ĠDurant":27686,"Ġoutspoken":27687,"orb":27688,"Ġzeal":27689,"Ġdisgrace":27690,"').":27691,"ĠCheng":27692,"289":27693,"ĠRena":27694,"ĠSuicide":27695,"294":27696,"Ġoutraged":27697,"ĠNewman":27698,"ĠNvidia":27699,"ĠAber":27700,"ĠBers":27701,"Ġrecreation":27702,"Window":27703,"ĠDP":27704,"xe":27705,"Ġpedoph":27706,"Ġfallout":27707,"amboo":27708,"Ġpresentations":27709,"ĠApps":27710,"Ġhtml":27711,"345":27712,"ĠXXX":27713,"Ġrubbing":27714,"ĠLeather":27715,"Ġhumidity":27716,"seys":27717,"established":27718,"ĠUnits":27719,"646":27720,"Ġrespectable":27721,"Auto":27722,"Ġthriving":27723,"ĠInnovation":27724,"angs":27725,"Extra":27726,"regulation":27727,"298":27728,"pick":27729,"Examples":27730,"ĠCJ":27731,"Attack":27732,"Ġdracon":27733,"LT":27734,"Ġsticker":27735,"rers":27736,"Ġsunny":27737,"Iss":27738,"regulated":27739,"dim":27740,"ĠAbstract":27741,"Ġhusbands":27742,"Office":27743,"omination":27744,"itars":27745,"ANGE":27746,"ascal":27747,"ĠKris":27748,"ĠInfantry":27749,"Ġmalf":27750,"ĠAthe":27751,"ĠRally":27752,"balanced":27753,"........................":27754,"OUP":27755,"Ġmolecule":27756,"metics":27757,"ĠSplit":27758,"ĠInstructions":27759,"ĠNights":27760,"cards":27761,"Ġtug":27762,"Ġcone":27763,"åŃ":27764,"Ġtx":27765,"ĠDiscussion":27766,"Ġcatastrophe":27767,"ppe":27768,"gio":27769,"Ġcommunism":27770,"Ġhalted":27771,"ĠGuant":27772,"clean":27773,"ĠSched":27774,"ĠKanye":27775,"Ġwander":27776,"ĠSeriously":27777,"Ġ188":27778,"ennial":27779,"follow":27780,"productive":27781,"ĠFlow":27782,"ĠSail":27783,"Ġcraw":27784,"Ġsimulations":27785,"oru":27786,"angles":27787,"ĠNolan":27788,"Ġmenstru":27789,"470":27790,"Ġ207":27791,"aja":27792,"Ġcasually":27793,"boarding":27794,"Ġ222":27795,"ovy":27796,"ĠNumbers":27797,"umat":27798,"OE":27799,"287":27800,"ĠClemson":27801,"Ġcerts":27802,"Ġslid":27803,"ĠTribe":27804,"Ġtoast":27805,"Ġfortunes":27806,"Ġfals":27807,"ĠCommittees":27808,"Ġgp":27809,"Ġfiery":27810,"ĠNets":27811,"ĠAnime":27812,"Package":27813,"ĠCompare":27814,"laughter":27815,"infect":27816,"Ġatrocities":27817,"Ġjustices":27818,"Ġinsults":27819,"ĠVernon":27820,"Ġshaken":27821,"Ġpersona":27822,"estamp":27823,"367":27824,"brain":27825,"Ġexperimenting":27826,"Ken":27827,"ĠElectronics":27828,"Ġ161":27829,"domain":27830,"Ġgraphical":27831,"bishop":27832,"Ġwhopping":27833,"ĠEvangel":27834,"Ġadvertisers":27835,"ĠSpear":27836,"Ġbids":27837,"Ġdestroys":27838,"utz":27839,"Ġundersc":27840,"ĠADD":27841,"Ġants":27842,"ĠCum":27843,"ipples":27844,"ĠFill":27845,"Ġglanced":27846,"Ġindicted":27847,"ĠEff":27848,"Ġmiscon":27849,"ĠDesktop":27850,"Ġabide":27851,"ãĥĢ":27852,"ĠIo":27853,"ĠCoul":27854,"Ġcapsule":27855,"ĠChrys":27856,"MON":27857,"Ġundes":27858,"ĠIRA":27859,"Ġcitation":27860,"Ġdictate":27861,"ĠNetworks":27862,"ĠConflict":27863,"ĠStuff":27864,"xa":27865,"isec":27866,"ĠChemistry":27867,"Ġquarterly":27868,"Williams":27869,"anan":27870,"Opt":27871,"ĠAlexandria":27872,"outheastern":27873,"ĠSpringfield":27874,"ĠBlacks":27875,"Ġgeography":27876,"242":27877,"Ġutmost":27878,"ĠExxon":27879,"abouts":27880,"EVA":27881,"ĠEnable":27882,"ĠBarr":27883,"Ġdisagreed":27884,"ĠCyprus":27885,"Ġdementia":27886,"Ġlabs":27887,"Ġubiquitous":27888,"ĠLOVE":27889,"Ġconsolidated":27890,"sr":27891,"Ġcreamy":27892,"ĠTimber":27893,"Regardless":27894,"ĠCertificate":27895,"Ġ\"...":27896,"ogenous":27897,"Captain":27898,"Ġinsulting":27899,"ĠSoros":27900,"ĠInstr":27901,"ĠBulgaria":27902,"better":27903,"Ġsucking":27904,"ĠDavidson":27905,"atz":27906,"Ġcollateral":27907,"gif":27908,"Ġplagued":27909,"ĠCancel":27910,"ĠGardner":27911,"RB":27912,"Ġsixteen":27913,"Remove":27914,"uristic":27915,"cook":27916,"Rod":27917,"Ġcomprising":27918,"fle":27919,")âĢĶ":27920,"ĠViking":27921,"growth":27922,"agonal":27923,"Ġsrf":27924,"afety":27925,"mot":27926,"Nearly":27927,"stown":27928,"ĠFactor":27929,"Ġautomobile":27930,"Ġprocedural":27931,"mask":27932,"ampires":27933,"Ġdisappears":27934,"jab":27935,"315":27936,"Ġ1951":27937,"needed":27938,"Ġdaring":27939,"leader":27940,"Ġpodium":27941,"Ġunhealthy":27942,"Ġmund":27943,"Ġpyramid":27944,"ocre":27945,"Ġkissed":27946,"Ġdreamed":27947,"ĠFantastic":27948,"ĠGly":27949,"åĬ":27950,"Ġgreatness":27951,"Ġspices":27952,"Ġmetropolitan":27953,"Ġcompuls":27954,"iets":27955,"1016":27956,"ĠSham":27957,"ĠPyr":27958,"flies":27959,"ĠMidnight":27960,"Ġswallowed":27961,"Ġgenres":27962,"ĠLucky":27963,"ĠRewards":27964,"Ġdispatch":27965,"ĠIPA":27966,"ĠApply":27967,"Ġaven":27968,"alities":27969,"312":27970,"things":27971,"Ġ().":27972,"Ġmates":27973,"ĠSz":27974,"ĠCOP":27975,"olate":27976,"OFF":27977,"Ġrecharge":27978,"caps":27979,"ĠYorker":27980,"icone":27981,"Ġgalaxies":27982,"ileaks":27983,"Dave":27984,"ĠPuzz":27985,"ĠCeltic":27986,"ĠAFC":27987,"276":27988,"ĠSons":27989,"Ġaffirmative":27990,"Hor":27991,"Ġtutorials":27992,"ĠCITY":27993,"ĠRosa":27994,"ĠExtension":27995,"Series":27996,"Ġfats":27997,"Ġrab":27998,"lis":27999,"Ġunic":28000,"Ġeve":28001,"ĠSpin":28002,"Ġadulthood":28003,"typ":28004,"Ġsectarian":28005,"Ġcheckout":28006,"ĠCycl":28007,"Single":28008,"Ġmartyr":28009,"Ġchilling":28010,"888":28011,"oufl":28012,"Ġ];":28013,"Ġcongestion":28014,"mk":28015,"ĠWhereas":28016,"Ġ1938":28017,"urrencies":28018,"erion":28019,"Ġboast":28020,"ĠPatients":28021,"Ġchap":28022,"ĠBD":28023,"realDonaldTrump":28024,"Ġexamines":28025,"hov":28026,"Ġstartling":28027,"ĠBabylon":28028,"wid":28029,"omew":28030,"brance":28031,"ĠOdyssey":28032,"wig":28033,"Ġtorch":28034,"ĠVox":28035,"ĠMoz":28036,"ĠTroll":28037,"ĠAns":28038,"Similarly":28039,"ĠFul":28040,"006":28041,"Unless":28042,"ĠAlone":28043,"stead":28044,"ĠPublisher":28045,"rights":28046,"tu":28047,"ĠDoesn":28048,"Ġprofessionally":28049,"Ġclo":28050,"icz":28051,"Ġsteals":28052,"Ġá":28053,"1986":28054,"Ġsturdy":28055,"ĠJohann":28056,"Ġmedals":28057,"Ġfilings":28058,"ĠFraser":28059,"done":28060,"Ġmultinational":28061,"Ġfeder":28062,"Ġworthless":28063,"Ġpest":28064,"Yesterday":28065,"ankind":28066,"Ġgays":28067,"Ġborne":28068,"ĠPOS":28069,"Picture":28070,"Ġpercentages":28071,"251":28072,"rame":28073,"Ġpotions":28074,"AMD":28075,"ĠLebanese":28076,"Ġrang":28077,"ĠLSU":28078,"ongs":28079,"Ġpeninsula":28080,"ĠClause":28081,"ALK":28082,"oha":28083,"ĠMacBook":28084,"Ġunanimous":28085,"Ġlenders":28086,"Ġhangs":28087,"Ġfranchises":28088,"orers":28089,"ĠUpdates":28090,"Ġisolate":28091,"andro":28092,"Soon":28093,"Ġdisruptive":28094,"ĠSurve":28095,"Ġstitches":28096,"ĠScorp":28097,"ĠDominion":28098,"Ġsupplying":28099,"Arg":28100,"Ġturret":28101,"ĠLuk":28102,"Ġbrackets":28103,"*)":28104,"ĠRevolutionary":28105,"ĠHonest":28106,"Ġnoticing":28107,"ĠShannon":28108,"Ġafforded":28109,"Ġtha":28110,"ĠJanet":28111,"!--":28112,"ĠNarendra":28113,"ĠPlot":28114,"Hol":28115,"sever":28116,"eenth":28117,"Ġobstruction":28118,"Ġ1024":28119,"staff":28120,"jas":28121,"orget":28122,"scenes":28123,"laughs":28124,"ĠFargo":28125,"crime":28126,"Ġorchestr":28127,"Ġdelet":28128,"iliary":28129,"rieved":28130,"Ġmilitar":28131,"ĠGreene":28132,"âĹı":28133,"ãģ¦":28134,"ĠGuards":28135,"Ġunleashed":28136,"ĠWeber":28137,"Ġadjustable":28138,"Ġcaliber":28139,"Ġmotivations":28140,"ĠÃł":28141,"mAh":28142,"ĠLanka":28143,"handle":28144,"Ġpent":28145,"ĠRav":28146,"ĠAngular":28147,"ĠKau":28148,"umbing":28149,"Ġphilanthrop":28150,"Ġdehyd":28151,"Ġtoxicity":28152,"eer":28153,"ĠYORK":28154,"witz":28155,"å¼":28156,"ĠIE":28157,"community":28158,"ĠAH":28159,"Ġretali":28160,"Ġmassively":28161,"ĠDaniels":28162,"ĠDEL":28163,"Ġcarcin":28164,"Url":28165,"Ġrouting":28166,"ĠNPCs":28167,"ĠRAF":28168,"ryce":28169,"Ġwaived":28170,"ĠGuatem":28171,"Everybody":28172,"Ġcovenant":28173,"Ġ173":28174,"Ġrelaxing":28175,"Ġquart":28176,"almost":28177,"Ġguarded":28178,"ĠSoldiers":28179,"ĠPLAY":28180,"Ġoutgoing":28181,"LAND":28182,"Ġrewrite":28183,"ĠMOV":28184,"ĠImper":28185,"ĠSolution":28186,"Ġphenomenal":28187,"Ġlongevity":28188,"Ġimpat":28189,"ĠNissan":28190,"irie":28191,"Ġodor":28192,"ĠZar":28193,"oks":28194,"Ġmilitias":28195,"ĠSPEC":28196,"Ġtolerated":28197,"arser":28198,"ĠBradford":28199,"+,":28200,"Ġsurreal":28201,"sf":28202,"Canadian":28203,"Ġresemblance":28204,"Ġcarbohydrate":28205,"VIEW":28206,"Ġaccessory":28207,"meal":28208,"largest":28209,"iegel":28210,"Someone":28211,"Ġtoughest":28212,"oso":28213,"Ġfunnel":28214,"Ġcondemnation":28215,"luent":28216,"Ġwired":28217,"ĠSunset":28218,"Jesus":28219,"ĠPST":28220,"ĠPages":28221,"ĠTycoon":28222,"ĠPF":28223,"Ġselections":28224,"Ġà¤":28225,"partisan":28226,"Ġhighs":28227,"ĠRune":28228,"Ġcrafts":28229,"lead":28230,"ĠParents":28231,"Ġreclaim":28232,"eker":28233,"ĠAllied":28234,"aeper":28235,"Ġlooming":28236,"Ġbeneficiaries":28237,"ĠHull":28238,"Students":28239,"Jewish":28240,"dj":28241,"Ġpact":28242,"template":28243,"ĠOfficials":28244,"ĠBaylor":28245,"Ġhemp":28246,"Ġyouths":28247,"ĠLevels":28248,"ĠXiao":28249,"ĠChes":28250,"Ġendeavor":28251,"ĠRemoved":28252,"Ġhippocamp":28253,"Hell":28254,"ãĤĬ":28255,"805":28256,"Ġdinosaur":28257,"ĠWrath":28258,"ĠIndonesian":28259,"Ġcalculator":28260,"ĠDictionary":28261,"Ġ420":28262,"ĠMAG":28263,"(_":28264,"!,":28265,"tarians":28266,"Ġrestricting":28267,"racuse":28268,"Ġweekday":28269,"OUNT":28270,"Ġshrugged":28271,"leground":28272,"Ġbald":28273,"ĠDoctors":28274,"Ġtouted":28275,"ĠMaxwell":28276,"Ġ214":28277,"Ġdiplomat":28278,"Ġrepression":28279,"Ġconstituency":28280,"vice":28281,"ranked":28282,"ĠNapoleon":28283,"gang":28284,"ĠForever":28285,"tun":28286,"Ġbulb":28287,"ĠPDT":28288,"ĠCisco":28289,"VEN":28290,"Ġresumed":28291,"Steven":28292,"ĠManitoba":28293,"Ġfabulous":28294,"ĠAgents":28295,"1984":28296,"Ġamusing":28297,"ĠMysteries":28298,"Ġorthodox":28299,"floor":28300,"Ġquestionnaire":28301,"Ġpenetrate":28302,"Ġfilmmakers":28303,"ĠUnc":28304,"Ġstamped":28305,"Ġthirteen":28306,"Ġoutfield":28307,"Ġforwarded":28308,"Ġappra":28309,"Ġaided":28310,"try":28311,"Ġunfocused":28312,"ĠLiz":28313,"ĠWendy":28314,"ĠScene":28315,"Charg":28316,"Ġrejects":28317,"Ġleftist":28318,"ĠProvidence":28319,"ĠBrid":28320,"regn":28321,"Ġprophecy":28322,"ĠLIVE":28323,"499":28324,"Ġforge":28325,"ĠFML":28326,"Ġintrinsic":28327,"ĠFrog":28328,"Ġwont":28329,"ĠHolt":28330,"Ġfamed":28331,"CLUS":28332,"aepernick":28333,"ĠHate":28334,"ĠCay":28335,"Ġregistering":28336,"ortality":28337,"ropy":28338,"ocalyptic":28339,"aan":28340,"nav":28341,"Ġfascist":28342,"IFIED":28343,"Ġimplicated":28344,"ĠResort":28345,"ĠChandler":28346,"ĠBrick":28347,"Pin":28348,"ysc":28349,"Usage":28350,"ĠHelm":28351,"usra":28352,"âĺħâĺħ":28353,"ĠAbbas":28354,"Ġunanimously":28355,"Ġkeeper":28356,"Ġaddicted":28357,"???":28358,"Ġhelmets":28359,"Ġantioxid":28360,"apsed":28361,"808":28362,"giene":28363,"Ġwaits":28364,"Ġminion":28365,"raved":28366,"ĠPorsche":28367,"Ġdreaming":28368,"Ġ171":28369,"ĠCain":28370,"Ġunfor":28371,"asso":28372,"ĠConfiguration":28373,"kun":28374,"hardt":28375,"Ġnested":28376,"ĠLDS":28377,"LES":28378,"Ġtying":28379,"enos":28380,"Ġcue":28381,"ĠMarqu":28382,"skirts":28383,"Ġclicked":28384,"Ġexpiration":28385,"ĠAccordingly":28386,"ĠWC":28387,"Ġblessings":28388,"Ġaddictive":28389,"ĠNarr":28390,"yx":28391,"ĠJaguars":28392,"Ġrents":28393,"ĠSiber":28394,"Ġtipped":28395,"ousse":28396,"ĠFitzgerald":28397,"Ġhierarch":28398,"outine":28399,"Ġwavelength":28400,">.":28401,"chid":28402,"ĠProcessing":28403,"/+":28404,"ranking":28405,"Easy":28406,"ĠConstruct":28407,"Ġtet":28408,"insured":28409,"HUD":28410,"Ġquoting":28411,"Ġcommunicated":28412,"inx":28413,"Ġinmate":28414,"Ġerected":28415,"ĠAbsolutely":28416,"ĠSurely":28417,"Ġunim":28418,"ĠThrone":28419,"heid":28420,"Ġclaws":28421,"Ġsuperstar":28422,"ĠLenn":28423,"ĠWhis":28424,"Uk":28425,"abol":28426,"Ġsket":28427,"ĠNiet":28428,"Ġperks":28429,"Ġaffinity":28430,"Ġopenings":28431,"phasis":28432,"Ġdiscriminate":28433,"Tip":28434,"vc":28435,"Ġgrinding":28436,"ĠJenny":28437,"Ġasthma":28438,"holes":28439,"ĠHomer":28440,"Ġregisters":28441,"ĠGlad":28442,"Ġcreations":28443,"Ġlithium":28444,"Ġapplause":28445,"until":28446,"Justice":28447,"ĠTurks":28448,"Ġscandals":28449,"Ġbake":28450,"tank":28451,"Mech":28452,"ĠMeans":28453,"ĠMaid":28454,"Republicans":28455,"isal":28456,"windows":28457,"ĠSantos":28458,"Ġvegetation":28459,"338":28460,"tri":28461,"Ġflux":28462,"insert":28463,"Ġclarified":28464,"Ġmortg":28465,"ĠChim":28466,"ĠTort":28467,"Ġdisclaim":28468,"metal":28469,"ĠAside":28470,"Ġinduction":28471,"Ġinfl":28472,"Ġatheists":28473,"amph":28474,"Ġether":28475,"ĠVital":28476,"ĠBuilt":28477,"Mind":28478,"Ġweaponry":28479,"SET":28480,"Ġ186":28481,"admin":28482,"gam":28483,"contract":28484,"afa":28485,"Ġderivatives":28486,"Ġsnacks":28487,"Ġchurn":28488,"Econom":28489,"Ġcapped":28490,"ĠUnderstanding":28491,"ĠHers":28492,"ĠIz":28493,"Ġduct":28494,"IENT":28495,"aughty":28496,"ĠâľĶ":28497,"ĠNP":28498,"Ġsailing":28499,"Initialized":28500,"Ġted":28501,"Ġreactors":28502,"ĠLomb":28503,"Ġchoke":28504,"ĠWorm":28505,"Ġadmiration":28506,"Ġswung":28507,"ensibly":28508,"Ġrash":28509,"ĠGoals":28510,"ĠImportant":28511,"Shot":28512,"ĠRas":28513,"Ġtrainers":28514,"ĠBun":28515,"Working":28516,"Ġharmed":28517,"ĠPandora":28518,"ĠLTE":28519,"Ġmushroom":28520,"ĠCHAR":28521,"ĠFee":28522,"ĠMoy":28523,"Born":28524,"oliberal":28525,"ĠMartial":28526,"Ġgentlemen":28527,"Ġlingering":28528,"Official":28529,"Ġgraffiti":28530,"ĠNames":28531,"Der":28532,"Ġquint":28533,"istrate":28534,"azeera":28535,"ĠNOTICE":28536,"ĠFlorence":28537,"Ġpayable":28538,"Ġdepicts":28539,"ĠSpecies":28540,"Heart":28541,"âĶĢâĶĢâĶĢâĶĢâĶĢâĶĢâĶĢâĶĢ":28542,"Ġenclosed":28543,"Increases":28544,"Daily":28545,"ĠLis":28546,"Ġenactment":28547,"ĠBacon":28548,"ĠSteele":28549,"demand":28550,"Ġ183":28551,"Ġmouths":28552,"Ġstranded":28553,"Ġenhancement":28554,"011":28555,"ĠWhats":28556,"Ġhealed":28557,"eny":28558,"ĠRab":28559,"Ġ340":28560,"ĠLabyrinth":28561,"roach":28562,"ĠYosh":28563,"ĠClippers":28564,"Ġconcerts":28565,"Internet":28566,"355":28567,"Ġstickers":28568,"Ġtermed":28569,"ĠAxe":28570,"Ġgrandparents":28571,"France":28572,"ĠClim":28573,"ĠUh":28574,"ulic":28575,"Ġthrill":28576,"centric":28577,"ĠOverview":28578,"ĠConduct":28579,"Ġsubstantive":28580,"Ġ182":28581,"mur":28582,"Ġstray":28583,"ĠCoff":28584,"Ġrepetitive":28585,"ĠForgotten":28586,"Ġqualification":28587,"ewitness":28588,"ĠZimbabwe":28589,"Ġsimulated":28590,"ĠJD":28591,"253":28592,"ĠWare":28593,"Ġunsc":28594,"Times":28595,"Ġsummons":28596,"Ġdisconnected":28597,"Ġ184":28598,"cius":28599,"ĠGujar":28600,"odka":28601,"Ġerase":28602,"ĠTobacco":28603,"elected":28604,"Ġuncont":28605,"ĠShepard":28606,"ĠLamp":28607,"Ġalerted":28608,"Ġoperative":28609,"arna":28610,"uint":28611,"Ġnegligence":28612,"acements":28613,"Ġsupra":28614,"Ġprevail":28615,"ĠShark":28616,"Ġbelts":28617,"ãģ«":28618,"Ġtighter":28619,"Engineers":28620,"Ġinactive":28621,"Ġexponent":28622,"ĠWillie":28623,"aples":28624,"Ġheir":28625,"ĠHits":28626,"iann":28627,"ĠSays":28628,"Ġcurrents":28629,"ĠBengal":28630,"Ġarist":28631,"Buffer":28632,"Ġbreeze":28633,"ĠWesley":28634,"Cola":28635,"Ġpronoun":28636,"Ġdeed":28637,"ĠKling":28638,"Ġoft":28639,"Ġinflict":28640,"Ġpunishing":28641,"Ġnm":28642,"iku":28643,"ODUCT":28644,"014":28645,"Ġsubsidy":28646,"ĠDEA":28647,"ĠHerbert":28648,"ĠJal":28649,"Bank":28650,"Ġdeferred":28651,"Ġshipment":28652,"Bott":28653,"Ġalle":28654,"bearing":28655,"HTML":28656,"Offline":28657,"Ġ213":28658,"Ġscrolling":28659,"Ġscanned":28660,"ĠLibyan":28661,"ĠTOP":28662,"chrom":28663,"dt":28664,"column":28665,"PsyNetMessage":28666,"Zero":28667,"Ġtorso":28668,"050":28669,"âķIJ":28670,"Ġimperson":28671,"ĠSchwartz":28672,"udic":28673,"Ġpissed":28674,"ĠSapp":28675,"257":28676,"ĠISPs":28677,"ogl":28678,"Ġsupervised":28679,"Ġadolescent":28680,"Ġattained":28681,"ĠDelivery":28682,"ĠBunny":28683,"Ġ1937":28684,"Ġminiature":28685,"Ġos":28686,"Ġ370":28687,"608":28688,"ĠMourinho":28689,"Ġinnate":28690,"Ġtempo":28691,"ĠNM":28692,"ĠFallen":28693,"009":28694,"Ġprovocative":28695,"Streamer":28696,"ĠBenedict":28697,"ĠBolshe":28698,"Ġturtle":28699,"ĠPCB":28700,"ĠEqual":28701,"Director":28702,"ĠRend":28703,"Ġfluids":28704,"Authorities":28705,"Ġcousins":28706,"requency":28707,"ĠNeighbor":28708,"sets":28709,"shared":28710,"Charles":28711,"password":28712,"Ġgears":28713,"Ġ211":28714,"ĠHardware":28715,"rika":28716,"Ġupstream":28717,"Hom":28718,"Ġdisproportionately":28719,"ivities":28720,"Ġundefined":28721,"Ġelectrons":28722,"Ġcommemor":28723,"Eventually":28724,"Ġ><":28725,"Ġirresponsible":28726,"218":28727,"ĠReleased":28728,"ĠOVER":28729,"ĠIGN":28730,"ĠBread":28731,"stellar":28732,"ĠSage":28733,"tted":28734,"damage":28735,"edition":28736,"ĠPrec":28737,"Ġlime":28738,"Ġconfinement":28739,"Ġcalorie":28740,"weapon":28741,"Ġdiffering":28742,"ĠSina":28743,"mys":28744,"amd":28745,"Ġintricate":28746,"kk":28747,"ĠPAT":28748,"ão":28749,"stones":28750,"links":28751,"Ġranch":28752,"Semitic":28753,"Ġdifferentiate":28754,"ĠSinger":28755,"occupied":28756,"Ġfortress":28757,"cmd":28758,"Ġinterception":28759,"ĠAnkara":28760,"Ġrept":28761,"ĠSolitaire":28762,"Ġremake":28763,"pred":28764,"Ġdared":28765,"autions":28766,"ĠBACK":28767,"Running":28768,"Ġdebugging":28769,"Ġgraphs":28770,"399":28771,"ĠNigel":28772,"Ġbun":28773,"Ġpillow":28774,"Ġprogressed":28775,"fashioned":28776,"Ġobedience":28777,"ERN":28778,"Ġrehears":28779,"Cell":28780,"tl":28781,"Sher":28782,"Ġherald":28783,"ĠPayment":28784,"ĠCory":28785,"ĠDept":28786,"Ġrepent":28787,"ĠWeak":28788,"uckland":28789,"Ġpleasing":28790,"Ġshortages":28791,"Ġjurors":28792,"ĠKab":28793,"qqa":28794,"Anti":28795,"Ġwow":28796,"ĠRCMP":28797,"Ġtsun":28798,"ĠSic":28799,"Ġcomprises":28800,"Ġspies":28801,"Ġprecinct":28802,"nu":28803,"Ġurges":28804,"Ġtimed":28805,"Ġstripes":28806,"ĠBoots":28807,"Ġyen":28808,"Advanced":28809,"Ġdiscrete":28810,"ĠArchangel":28811,"employment":28812,"Diff":28813,"Ġmonuments":28814,"Ġ209":28815,"worker":28816,"Ġ196":28817,"ĠIg":28818,"utterstock":28819,"TPS":28820,"Jac":28821,"Ġhomelessness":28822,"Ġcommentator":28823,"Ġracially":28824,"fing":28825,"seed":28826,"Ele":28827,"ellation":28828,"Ġethanol":28829,"Ġparish":28830,"ĠDong":28831,"ĠAwakening":28832,"Ġdeviation":28833,"ĠBearing":28834,"ĠTsuk":28835,"Ġrecess":28836,"Ġlymph":28837,"ĠCannabis":28838,"åľ":28839,"ĠNEWS":28840,"Ġdra":28841,"ĠStefan":28842,"ĠWrong":28843,"ĠSAM":28844,"Ġloosely":28845,"Ġinterpreter":28846,"ĠPlain":28847,"Government":28848,"Ġbigotry":28849,"Ġgrenades":28850,"avez":28851,"pictured":28852,"Ġmandated":28853,"ĠMonk":28854,"ĠPedro":28855,"Ġlava":28856,"274":28857,"Ġcynical":28858,"ĠScrolls":28859,"locks":28860,"Mp":28861,"Ġcongregation":28862,"ornings":28863,"phil":28864,"ĠIbid":28865,"Ġferv":28866,"Ġdisappearing":28867,"Ġarrogant":28868,"syn":28869,"ĠMaver":28870,"ĠSuit":28871,"241":28872,"Ġabbre":28873,"ackers":28874,"Pa":28875,"ĠYel":28876,"Whenever":28877,"Ġ235":28878,"ĠVine":28879,"ĠAnat":28880,"Ġextinct":28881,"LET":28882,"Ġexecutable":28883,"VERS":28884,"oxide":28885,"DNA":28886,"ĠPrel":28887,"Ġresentment":28888,"Ġcomprise":28889,"ĠAviv":28890,"Ġinterceptions":28891,"Ġprolific":28892,"INA":28893,"ĠErin":28894,"thought":28895,"219":28896,"ĠPsychiatry":28897,"unky":28898,"chemist":28899,"Ho":28900,"ĠMcCoy":28901,"Ġbricks":28902,"Los":28903,"rily":28904,"ĠUSSR":28905,"Ġrud":28906,"Ġlaud":28907,"ĠWise":28908,"ĠEmerald":28909,"Ġrevived":28910,"Ġdamned":28911,"ĠRepair":28912,"idem":28913,"ctica":28914,"Ġpatriarch":28915,"ĠNurs":28916,"meg":28917,"Ġcheapest":28918,"reements":28919,"empty":28920,"ĠCelebr":28921,"Ġdeprivation":28922,"chanted":28923,"ĠThumbnails":28924,"Energy":28925,"ĠEthan":28926,"ĠQing":28927,"Ġopposes":28928,"WIND":28929,"vik":28930,"ĠMau":28931,"ĠSUB":28932,"667":28933,"GRE":28934,"ĠVolunte":28935,"nton":28936,"Cook":28937,"åIJ":28938,"esque":28939,"Ġplummet":28940,"Ġsuing":28941,"Ġpronounce":28942,"Ġresisting":28943,"ĠFishing":28944,"ĠTrials":28945,"Ġyell":28946,"Ġ310":28947,"Ġinduct":28948,"Ġpersonalized":28949,"often":28950,"Reb":28951,"EMBER":28952,"Ġviewpoint":28953,"Ġexistential":28954,"())":28955,"remove":28956,"MENTS":28957,"lasses":28958,"Ġevapor":28959,"Ġaisle":28960,"meta":28961,"Ġreflective":28962,"Ġentitlement":28963,"Ġdevised":28964,"music":28965,"ascade":28966,"Ġwinding":28967,"offset":28968,"Ġaccessibility":28969,"kered":28970,"Better":28971,"ĠJohnston":28972,"thinking":28973,"Snow":28974,"ĠCroatia":28975,"ĠAtomic":28976,"271":28977,"348":28978,"Ġtextbook":28979,"ĠSixth":28980,"ĠاÙĦ":28981,"Ġslider":28982,"ĠBurger":28983,"bol":28984,"Sync":28985,"Ġgrandchildren":28986,"Ġcerv":28987,"+)":28988,"Ġeternity":28989,"Ġtweeting":28990,"Ġspeculative":28991,"Ġpivotal":28992,"ĠWP":28993,"ĠTER":28994,"ynamic":28995,"Ġupl":28996,"ĠCats":28997,"perhaps":28998,"Ġclassmates":28999,"Ġblatant":29000,"'-":29001,"Ġlakh":29002,"antine":29003,"ĠBorg":29004,"iom":29005,"/(":29006,"ĠAthletic":29007,"Ġsar":29008,"OTA":29009,"ĠHoffman":29010,"Nevertheless":29011,"Ġadorable":29012,"Ġspawned":29013,"Associated":29014,"ĠDomestic":29015,"Ġimplant":29016,"ĠLuxem":29017,"ĠKens":29018,"Ġpumps":29019,"ĠSAT":29020,"Attributes":29021,"509":29022,"avour":29023,"Ġcentralized":29024,"ĠTN":29025,"Ġfreshly":29026,"ĠAchieve":29027,"Ġoutsiders":29028,"herty":29029,"ĠRee":29030,"ĠTowers":29031,"ĠDart":29032,"akable":29033,"Ġmp":29034,"ĠHeavenly":29035,"Ġripe":29036,"ĠCaroline":29037,"ryan":29038,"Ġclassics":29039,"Ġretiring":29040,"Ġ228":29041,"Ġah":29042,"Ġdealings":29043,"Ġpunching":29044,"ĠChapman":29045,"Options":29046,"maxwell":29047,"volume":29048,"Ġstal":29049,"Ġexported":29050,"ĠQuite":29051,"Ġnumerical":29052,"Burn":29053,"Fact":29054,"ĠKeystone":29055,"Ġtrending":29056,"Ġaltering":29057,"ĠAfricans":29058,"478":29059,"ĠMN":29060,"ĠKnock":29061,"Ġtemptation":29062,"Ġprestige":29063,"Overview":29064,"ĠTraditional":29065,"ĠBahrain":29066,"Private":29067,"ĠHOU":29068,"Ġbarr":29069,"ĠTat":29070,"Cube":29071,"USD":29072,"ĠGrande":29073,"ĠGat":29074,"ĠFlo":29075,"Ġresides":29076,"Ġindec":29077,"volent":29078,"Ġperpetual":29079,"ubes":29080,"Ġworldview":29081,"ĠQuantum":29082,"Ġfiltered":29083,"Ġensu":29084,"orgetown":29085,"ERSON":29086,"ĠMild":29087,"379":29088,"OTT":29089,"Ã¥":29090,"Ġvitamins":29091,"Ġribbon":29092,"Ġsincerely":29093,"ĠHin":29094,"Ġeighteen":29095,"Ġcontradictory":29096,"Ġglaring":29097,"Ġexpectancy":29098,"Ġconspir":29099,"Ġmonstrous":29100,"Ġ380":29101,"reci":29102,"Ġhandic":29103,"Ġpumped":29104,"Ġindicative":29105,"Ġrapp":29106,"Ġavail":29107,"ĠLEGO":29108,"ĠMarijuana":29109,"1985":29110,"erton":29111,"Ġtwentieth":29112,"################################":29113,"ĠSwamp":29114,"Ġvaluation":29115,"Ġaffiliates":29116,"adjusted":29117,"ĠFacility":29118,"262":29119,"Ġenzymes":29120,"itudinal":29121,"Ġimprint":29122,"Site":29123,"Ġinstaller":29124,"ĠTRA":29125,"mology":29126,"linear":29127,"ĠCollective":29128,"igating":29129,"ĠToken":29130,"Ġspeculated":29131,"KN":29132,"ĠCly":29133,"ority":29134,"Ġdefer":29135,"Ġinspectors":29136,"approved":29137,"RM":29138,"ĠSuns":29139,"Ġinforming":29140,"ĠSyracuse":29141,"ibli":29142,"765":29143,"Ġglove":29144,"Ġauthorize":29145,"âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦":29146,"ĠCruise":29147,"Ġcontracting":29148,"shell":29149,"IFE":29150,"ĠJewel":29151,"pract":29152,"ĠPhotoshop":29153,"ĠKnowing":29154,"harm":29155,"Ġattractions":29156,"adan":29157,"etus":29158,"018":29159,"wagen":29160,"Alt":29161,"Ġmultiply":29162,"Ġequilibrium":29163,":{":29164,"ĠFighters":29165,"ĠEdgar":29166,"Ġfourteen":29167,"Govern":29168,"Ġmisuse":29169,"Ġabusing":29170,"Ġancestry":29171,"ramer":29172,"644":29173,"Ġworms":29174,"Ġthicker":29175,"ĠCombine":29176,"Ġpeasants":29177,"Ġvind":29178,"Ġconquest":29179,"Ġmocked":29180,"Ġcinnamon":29181,"ĠCald":29182,"ĠGallup":29183,"Ġavoidance":29184,"Ġincarnation":29185,"ĠStrat":29186,"Ġtasted":29187,"enta":29188,"ĠNeal":29189,"pared":29190,"Ġterminology":29191,"jection":29192,"Scientists":29193,"ĠINS":29194,"ĠDee":29195,"Ġdirectories":29196,"Road":29197,"ĠShap":29198,"bright":29199,"ĠDirectors":29200,"ĠColumn":29201,"Ġbob":29202,"Ġpreferably":29203,"Ġglitch":29204,"furt":29205,"Ġeg":29206,"idis":29207,"CBC":29208,"Ġsurrendered":29209,"Ġtestament":29210,"336":29211,"uggest":29212,"ĠNil":29213,"another":29214,"Ġpathetic":29215,"ĠDonna":29216,"Ġ218":29217,"ĠAvery":29218,"Ġwhiskey":29219,"Ġfixture":29220,"ĠConquest":29221,"Ġbets":29222,"Occ":29223,"ĠLeicester":29224,"].\"":29225,"Ġ));":29226,"Ġflashes":29227,"456":29228,"Ġmasked":29229,"gebra":29230,"Ġcomputed":29231,"chel":29232,"auder":29233,"Ġdefeats":29234,"ĠLiberation":29235,"ĠOsama":29236,"ĠVive":29237,"Changes":29238,"Channel":29239,"Ġtariffs":29240,"Ġmage":29241,"ĠSax":29242,"Ġinadvertently":29243,"ĠCRE":29244,"ĠReaper":29245,"inky":29246,"grading":29247,"Ġstereotyp":29248,"Ġcurl":29249,"ĠFANT":29250,"Ġframeworks":29251,"Mom":29252,"ĠAnch":29253,"Ġflavour":29254,"carbon":29255,"Ġpermitting":29256,"letcher":29257,"ĠMozilla":29258,"ĠParking":29259,"ĠChamp":29260,"Scroll":29261,"Ġmurderer":29262,"Ġrested":29263,"Ġowes":29264,"ĠPoss":29265,"ADD":29266,"IFF":29267,"resolution":29268,"ĠMining":29269,"Ġcomparative":29270,"Dim":29271,"Ġneighbouring":29272,"ĠAST":29273,"ĠToxic":29274,"Ġbiases":29275,"Ġgunfire":29276,"urous":29277,"ĠMoment":29278,"1983":29279,"Ġpervasive":29280,"ttp":29281,"ĠNormally":29282,"rir":29283,"Sarah":29284,"ĠAlbany":29285,"Ġunsett":29286,"ĠSMS":29287,"ipers":29288,"layer":29289,"ĠWhites":29290,"uple":29291,"Ġturbo":29292,"ĠLeeds":29293,"Ġthats":29294,"ĠMiner":29295,"MER":29296,"ĠReign":29297,"Ġperme":29298,"ĠBlitz":29299,"Ġ1934":29300,"Ġintimidating":29301,"tube":29302,"Ġeccentric":29303,"abolic":29304,"boxes":29305,"ĠAssociates":29306,"votes":29307,"Ġsimulate":29308,"umbo":29309,"astery":29310,"Ġshipments":29311,"FFFF":29312,"anth":29313,"Ġseasoned":29314,"Ġexperimentation":29315,"âĸł":29316,"laws":29317,"Meet":29318,"iddles":29319,"antics":29320,"Rating":29321,"ISIS":29322,"hift":29323,"Ġfronts":29324,"buf":29325,"017":29326,"Ġunatt":29327,"ĠDil":29328,"leases":29329,"ĠGardens":29330,"777":29331,"touch":29332,"vell":29333,"458":29334,"Ġ=====":29335,"saving":29336,"Ġerosion":29337,"ĠQuin":29338,"Ġearns":29339,"Ġaccomplishment":29340,"ĠWei":29341,"Ġ<[":29342,"_____":29343,"Ġirrig":29344,"ĠTeddy":29345,"Ġconquered":29346,"ĠArmored":29347,"Ġasserts":29348,"Ġmanipulating":29349,"ré":29350,"Ġtranscripts":29351,"Gallery":29352,"Ġplotting":29353,"Neil":29354,"Ġbetrayal":29355,"loader":29356,"ĠSul":29357,"Ġdisplacement":29358,"Ġroyalty":29359,"ĠWI":29360,"heit":29361,"ĠDevices":29362,"allel":29363,"Ġmunicipalities":29364,"Ġcanal":29365,"Stars":29366,"ĠUAE":29367,"Ġ\"âĢ¦":29368,"ĠCU":29369,"above":29370,"Ġresonance":29371,"ĠguiActiveUn":29372,"added":29373,"ĠBraves":29374,"ĠIbn":29375,"Ġhereby":29376,"ĠBRE":29377,"Ġshareholder":29378,"ĠHir":29379,"ĠJi":29380,"Ġstrangely":29381,"Ġadmired":29382,"Ġplight":29383,"Ġbachelor":29384,"ĠPole":29385,"ciplinary":29386,"Tony":29387,"ĠArmenian":29388,"Ġunman":29389,"ĠZionist":29390,"Stage":29391,"iscover":29392,"Ġautomotive":29393,"Ġsidelines":29394,"Ġslick":29395,"ĠRenaissance":29396,"ĠFUN":29397,"Images":29398,"ĠHaj":29399,"Ġping":29400,"Ġshortcut":29401,"ĠBlvd":29402,"ĠLooks":29403,"Ġbursts":29404,"Ġclamp":29405,"Ġmish":29406,"Ġsorting":29407,"Ġpatriot":29408,"Ġcorrectness":29409,"ĠScandinav":29410,"ĠCavaliers":29411,"python":29412,"azar":29413,"Ġ375":29414,"ĠJaune":29415,"409":29416,"Ġdetrimental":29417,"Ġstabbing":29418,"Ġpoisoned":29419,"Ġfountain":29420,"ocent":29421,"orst":29422,"ĠMari":29423,"Ġrains":29424,"ĠOvers":29425,"ĠInstitution":29426,"udget":29427,"AMY":29428,"tale":29429,"ĠKR":29430,"ĠPrices":29431,"Ġheadaches":29432,"Ġlandsl":29433,"ĠAura":29434,"Bonus":29435,"ĠZhao":29436,"ĠHip":29437,"Ġhops":29438,"ĠKurdistan":29439,"Ġexploiting":29440,"ryn":29441,"Ġhypocrisy":29442,"opening":29443,"Ġgunshot":29444,"Ġwed":29445,"interstitial":29446,"Interstitial":29447,"Ġamen":29448,"Breaking":29449,"Ġmarketed":29450,"Wire":29451,"ĠCrowd":29452,"Continue":29453,"ĠKnown":29454,"ĠEffective":29455,"orean":29456,"izons":29457,"Joseph":29458,"Ġescalation":29459,"username":29460,"Ġcurtain":29461,"ATES":29462,"ĠPAR":29463,"ĠMiy":29464,"Ġcounterfe":29465,"lene":29466,"Ġcontenders":29467,"daily":29468,"ĠAsc":29469,"ĠPhillip":29470,"mostly":29471,"Ġfilename":29472,"hene":29473,"Ġresembling":29474,"Ġstaging":29475,"ĠChloe":29476,"Ġwiring":29477,"Hon":29478,"ĠRenew":29479,"ottage":29480,"ĠHybrid":29481,"much":29482,"Ġstrokes":29483,"Ġpolicymakers":29484,"APTER":29485,"ĠArkham":29486,"plot":29487,"Ġassistants":29488,"Ġdeport":29489,"ĠSega":29490,"Ġinfluenza":29491,"ĠCursed":29492,"ĠKobe":29493,"Ġskinny":29494,"Provider":29495,"ĠRip":29496,"Ġincremental":29497,"products":29498,"BF":29499,"Ġdome":29500,"ĠCredits":29501,"Ġlosers":29502,"ints":29503,"ĠBetty":29504,"ĠTalent":29505,"ĠDAM":29506,"Lv":29507,"Ess":29508,"Ġdens":29509,"temp":29510,"Judge":29511,"odic":29512,"Ġ'(":29513,"URES":29514,"etsk":29515,"VO":29516,"Ġretrieved":29517,"Ġarchitects":29518,"Ùĩ":29519,"Ġethic":29520,"ĠSecondary":29521,"stocks":29522,"adia":29523,"Ġ325":29524,"ĠOpinion":29525,"Ġsimultaneous":29526,"Ġdizz":29527,"ulp":29528,"Ġsmuggling":29529,"ippery":29530,"Random":29531,"facing":29532,"ĠDas":29533,"Ġstockp":29534,"Ġdisclosures":29535,"pointer":29536,"Ġcoral":29537,"ĠSelection":29538,"ĠPike":29539,"ivalent":29540,"Ġruthless":29541,"ĠRim":29542,"Ġensuing":29543,"ĠExperiment":29544,"Ġcongressman":29545,"Ġbeliever":29546,"Ġunspecified":29547,"ĠMord":29548,"Ġknowledgeable":29549,"ĠVERY":29550,"TX":29551,"Ġstraps":29552,"Ġturf":29553,"apeshifter":29554,"Ġmarital":29555,"Ġflock":29556,"ãģĨ":29557,"263":29558,"AMES":29559,"ĠOpposition":29560,"Ġtreasures":29561,"ĠGOD":29562,"Ġmodeled":29563,"ĠWORLD":29564,"Ġ([":29565,"ĠUsage":29566,"HF":29567,"Ġ$(":29568,"ussed":29569,"Ġpioneer":29570,"Eight":29571,"parse":29572,"bread":29573,"ritz":29574,"ĠMiranda":29575,"ĠKant":29576,"++)":29577,"oren":29578,"Ġprovoked":29579,"Ġbreeds":29580,"ĠIncludes":29581,"ĠPastebin":29582,"ĠFlip":29583,"Java":29584,"Ġbrink":29585,"Ġrumored":29586,"Ġunseen":29587,"Ġgarnered":29588,"ĠDefin":29589,"alted":29590,"Ġtattoos":29591,"Ġhesitation":29592,"isitions":29593,"ĠWeaver":29594,"ĠReporting":29595,"Ġtherapies":29596,"Ġconsultants":29597,"Ġresidual":29598,"ĠMali":29599,"ĠRoma":29600,"iago":29601,"ĠResidents":29602,"ubi":29603,"Ġremedies":29604,"Ġadaptive":29605,"ĠAlive":29606,"ĠBarcl":29607,"Ġwallets":29608,"crypt":29609,"etermination":29610,"ĠPelosi":29611,"Ġslipping":29612,"otonin":29613,"Ġalliances":29614,"patrick":29615,"iris":29616,"Ġorth":29617,"ĠPerkins":29618,"ĠDeV":29619,"ĠGets":29620,"Ġdrying":29621,"gee":29622,"forest":29623,"ĠForget":29624,"orem":29625,"339":29626,"Ġvaguely":29627,"ĠDion":29628,"ĠPorn":29629,"ĠHOW":29630,"Ġpneum":29631,"Ġrubble":29632,"ĠTaste":29633,"encia":29634,"ĠGel":29635,"Ġdst":29636,"Ġ245":29637,"ĠMorocco":29638,"inflamm":29639,"ĠTwins":29640,"Ġbots":29641,"daughter":29642,"ĠBalk":29643,"Ġbrethren":29644,"Ġlogos":29645,"Ġgobl":29646,"fps":29647,"Ġsubdivision":29648,"Ġpawn":29649,"Ġsqueezed":29650,"Ġmorale":29651,"ĠDW":29652,"'\"":29653,"Ġknot":29654,"ooky":29655,"Ġdivisive":29656,"Ġboosted":29657,"chy":29658,"ãĥIJ":29659,"ifact":29660,"Ġnewcomers":29661,"ĠWrestling":29662,"Ġscouts":29663,"wolves":29664,"Rat":29665,"Ġnineteenth":29666,"ĠOsborne":29667,"Stats":29668,"Ġempowered":29669,"Ġpsychopath":29670,"ĠOEM":29671,"uggage":29672,"ĠPK":29673,"ĠMohammad":29674,"Pak":29675,"Ġanarchists":29676,"ĠExtract":29677,"esthes":29678,"ĠStockholm":29679,"loo":29680,"ĠGraph":29681,"Ġdeploying":29682,"ĠStranger":29683,"ĠMold":29684,"Ġstaffer":29685,"Ġdiscounted":29686,"uckle":29687,"please":29688,"ĠLanding":29689,"ÃŃa":29690,"Ġ193":29691,"Ġante":29692,"Ġrepetition":29693,"Ġ+/-":29694,"Ġparody":29695,"Ġlively":29696,"AAA":29697,"ĠHorus":29698,"Ġpits":29699,"inders":29700,"LOC":29701,"ĠVenice":29702,"406":29703,"ĠDiscover":29704,"âĨ":29705,"ellectual":29706,"Ġpens":29707,"Ġeyel":29708,"iguous":29709,"Impl":29710,"Ġjoking":29711,"Ġinval":29712,"ĠBelfast":29713,"Ġcreditors":29714,"ĠSkywalker":29715,"ovsky":29716,"Ġceasefire":29717,"Ġseals":29718,"isoft":29719,")).":29720,"ĠFelix":29721,"ITS":29722,"Ġtresp":29723,"ĠBlockchain":29724,"eware":29725,"ĠSchwar":29726,"enne":29727,"mounted":29728,"ĠBeacon":29729,"lesh":29730,"Ġimmensely":29731,"Ġcheering":29732,"Employ":29733,"scene":29734,"ishly":29735,"atchewan":29736,"ĠNicolas":29737,"Ġdrained":29738,"ĠExit":29739,"ĠAzerb":29740,"jun":29741,"Ġfloated":29742,"uania":29743,"Deep":29744,"Ġsuperv":29745,"Ġmystical":29746,"ĠDollar":29747,"ĠApostle":29748,"ĠREL":29749,"ĠProvided":29750,"ĠBucks":29751,"ãĥ´":29752,"cutting":29753,"Ġenhancements":29754,"ĠPenguins":29755,"ĠIsaiah":29756,"Ġjerk":29757,"ĠWyn":29758,"Ġstalled":29759,"Ġcryptocurrencies":29760,"ĠRoland":29761,"single":29762,"Ġlumin":29763,"ĠFellow":29764,"ĠCapacity":29765,"ĠKazakh":29766,"WN":29767,"Ġfinanced":29768,"389":29769,"Ġtid":29770,"Ġcollusion":29771,"ĠMyr":29772,"îĢ":29773,"Senator":29774,"Ġpediatric":29775,"Ġneatly":29776,"Ġsandwiches":29777,"ĠArchitecture":29778,"Ġtucked":29779,"Ġbalcony":29780,"Ġearthquakes":29781,"quire":29782,"Future":29783,"Ġhefty":29784,"éĹ":29785,"Ġspecializes":29786,"Ġstresses":29787,"Ġsender":29788,"Ġmisunderstanding":29789,"Ġepile":29790,"Ġprovoke":29791,"ĠColors":29792,"Ġdismay":29793,"uko":29794,"[_":29795,"586":29796,"neutral":29797,"Ġdonating":29798,"ĠRandall":29799,"Multi":29800,"Ġconveniently":29801,"ĠSung":29802,"ĠCoca":29803,"Ġtents":29804,"ĠAcceler":29805,"Ġpartnered":29806,"272":29807,"irming":29808,"ĠBAS":29809,"sometimes":29810,"Ġobjected":29811,"ubric":29812,"posed":29813,"LCS":29814,"grass":29815,"Ġattributable":29816,"VIS":29817,"Israeli":29818,"Ġrepeats":29819,"ĠRM":29820,"vag":29821,"uta":29822,"inous":29823,"Ġinert":29824,"ĠMiguel":29825,"æŃ":29826,"ĠHawaiian":29827,"Board":29828,"Ġartific":29829,"ĠAzerbai":29830,"asio":29831,"ĠRent":29832,"AIN":29833,"Ġappliances":29834,"Ġnationality":29835,"Ġasshole":29836,"ĠNeb":29837,"Ġnotch":29838,"hani":29839,"ĠBride":29840,"Availability":29841,"Ġintercepted":29842,"Ġcontinental":29843,"Ġswelling":29844,"ĠPerspect":29845,"bies":29846,".<":29847,"ithmetic":29848,"ĠLara":29849,"Ġtempting":29850,"addr":29851,"Ġoverseeing":29852,"clad":29853,"ĠDV":29854,"ĠGingrich":29855,"Ġmun":29856,"ĠAppropri":29857,"Ġalterations":29858,"ĠPatreon":29859,"Ġhavoc":29860,"Ġdisciplines":29861,"Ġnotoriously":29862,"akuya":29863,"ieri":29864,"?).":29865,"ĠWent":29866,"Ġsilicon":29867,"Ġtremb":29868,"Container":29869,"Known":29870,"Ġmortar":29871,"este":29872,"icka":29873,"Arthur":29874,"ĠPreviously":29875,"ĠMarty":29876,"Ġsparse":29877,"gins":29878,"Ġinward":29879,"ĠParticipant":29880,"Copy":29881,"ĠMisc":29882,"Ġantibiotic":29883,"ĠRetro":29884,"Ġelusive":29885,"Ġassail":29886,"ĠBattalion":29887,"ĠBought":29888,"Ġdiminish":29889,"ĠEuropa":29890,"session":29891,"ĠDangerous":29892,"iesel":29893,"Ġdisbelief":29894,"Ġblasts":29895,"extreme":29896,"ĠBoyd":29897,"ĠProjects":29898,"ĠGuys":29899,"Ġundergone":29900,"Ġgrill":29901,"ĠDwight":29902,"Ġ197":29903,"USER":29904,"Ġfilesystem":29905,"Ġclocks":29906,"Taylor":29907,"Ġwrapper":29908,"Ġfolding":29909,"ousand":29910,"ĠPhilippine":29911,"ATIONAL":29912,"ĠPerth":29913,"Ġashes":29914,"Ġaccumulate":29915,"ĠGateway":29916,"Shop":29917,"orkshire":29918,"Han":29919,"ĠBarrel":29920,"ĠLeh":29921,"ĠXV":29922,"Ġwhim":29923,"Ġrepo":29924,"ĠCG":29925,"ĠMam":29926,"Ġincorporating":29927,"Ġbailout":29928,"Ġlinguistic":29929,"Ġdisinteg":29930,"CLE":29931,"Ġcinematic":29932,"ĠFiber":29933,"Syn":29934,"ilion":29935,"ĠCompos":29936,"chens":29937,"Ġneoc":29938,"Ġboiled":29939,"FINE":29940,"ono":29941,"uncle":29942,"iken":29943,"ĠBM":29944,"ι":29945,"Ġreceipts":29946,"Ġdisposed":29947,"ĠThirty":29948,"ĠRough":29949,"ĠABS":29950,"Ġnotwithstanding":29951,"ollen":29952,"#$":29953,"Ġunreliable":29954,"Ġbloom":29955,"Ġmediocre":29956,"Ġtram":29957,"ĠTasman":29958,"Ġshakes":29959,"Ġmanifesto":29960,"ĠMW":29961,"Ġsatisfactory":29962,"Ġshores":29963,"Ġcomputation":29964,"Ġassertions":29965,"ormons":29966,"arag":29967,"abit":29968,"Democrats":29969,"ĠLoot":29970,"ĠVolks":29971,"haired":29972,"Ġgravitational":29973,"Sing":29974,"ĠMiz":29975,"Ġthrottle":29976,"Ġtyranny":29977,"ĠViews":29978,"Ġrobber":29979,"ĠMinority":29980,"Ġshrine":29981,"scope":29982,"purpose":29983,"Ġnucleus":29984,"ourcing":29985,"ĠUSDA":29986,"ĠDHS":29987,"wra":29988,"ĠBowie":29989,"Scale":29990,"ĠBEL":29991,"xi":29992,"Iter":29993,"Ġ(),":29994,"wright":29995,"Ġsailors":29996,"oused":29997,"NASA":29998,"ĠProof":29999,"ĠMineral":30000,"token":30001,"ĠFD":30002,"Rew":30003,"Ġell":30004,"630":30005,"Ġchancellor":30006,"ĠGos":30007,"Ġamounted":30008,"ĠRecre":30009,"omez":30010,"ĠOptim":30011,"ĠOlive":30012,"Ġtracker":30013,"owler":30014,"ĠUnique":30015,"Root":30016,"Ġmaritime":30017,"ĠQuran":30018,"ĠAdapt":30019,"Ġecosystems":30020,"ĠRepeat":30021,"ĠSoy":30022,"ĠIMP":30023,"Ġgraduating":30024,"andem":30025,"Pur":30026,"ĠReset":30027,"ĠTrick":30028,"ĠPhilly":30029,"ĠTue":30030,"ĠMalaysian":30031,"Ġclimax":30032,"Ġbury":30033,"Ġconspic":30034,"ĠSouthampton":30035,"ĠFlowers":30036,"Ġescorted":30037,"ĠEducational":30038,"ĠIRC":30039,"Ġbrutally":30040,"eating":30041,"Ġpillar":30042,"ĠSang":30043,"ĠJude":30044,"arling":30045,"ĠAmnesty":30046,"Ġreminding":30047,"ĠAdministrative":30048,"hesda":30049,"Ġflashed":30050,"ĠPBS":30051,"perate":30052,"feature":30053,"Ġswipe":30054,"Ġgraves":30055,"oultry":30056,"261":30057,"breaks":30058,"ĠGuer":30059,"Ġshrimp":30060,"ĠVoting":30061,"quist":30062,"Ġanalytical":30063,"Ġtablespoons":30064,"ĠSOU":30065,"Ġresearched":30066,"Ġdisrupted":30067,"Ġjour":30068,"Ġreplica":30069,"Ġcartoons":30070,"bians":30071,"})":30072,"copy":30073,"Got":30074,"ouched":30075,"PUT":30076,"Ġswarm":30077,"notations":30078,"said":30079,"Ġrebuilt":30080,"Ġcollaborate":30081,"Ġraging":30082,"Ġnar":30083,"Ġdemographics":30084,"ĠDDR":30085,"Ġdistrust":30086,"ossier":30087,"ĠKro":30088,"Ġpumpkin":30089,"Ġregrets":30090,"Ġfatalities":30091,"ĠLens":30092,"ĠOle":30093,"pd":30094,"Ġpuppet":30095,"ĠOutlook":30096,"ĠStam":30097,"Ol":30098,"Fair":30099,"UU":30100,"Ġrewritten":30101,"ı":30102,"Ġfascinated":30103,"Ġvectors":30104,"Ġtribunal":30105,"uay":30106,"ĠMats":30107,"ĠCoins":30108,"[[":30109,"Ġ181":30110,"Ġrenders":30111,"ĠKaepernick":30112,"Ġespionage":30113,"Ġsumm":30114,"Ġditch":30115,"Account":30116,"Ġspreadsheet":30117,"Ġmutant":30118,"past":30119,"407":30120,"Ġdye":30121,"Ġinitiation":30122,"Ġ4000":30123,"Ġpunishable":30124,"Ġthinner":30125,"ĠKhal":30126,"Ġintermedi":30127,"Dun":30128,"ĠGotham":30129,"Ġeagerly":30130,"Ġvaginal":30131,"powers":30132,"VW":30133,"ĠWATCHED":30134,"Ġpredator":30135,"amsung":30136,"Ġdisparity":30137,"Ġ[*":30138,"Ġamph":30139,"Ġoutskirts":30140,"ĠSpirits":30141,"Ġskeletal":30142,"л":30143,"ĠRear":30144,"Ġissuance":30145,"ĠLogic":30146,"released":30147,"ZZ":30148,"ĠBound":30149,"Entry":30150,"Ġexits":30151,"isol":30152,"ĠFounder":30153,"Ġwre":30154,"ĠGreenland":30155,"ĠMMO":30156,"taker":30157,"INC":30158,"ãģ¾":30159,"Ġhourly":30160,"henko":30161,"Ġfantasies":30162,"Ġdisob":30163,"Ġdemolition":30164,"ãĥĭ":30165,"Ġenlisted":30166,"ratulations":30167,"Ġmisguided":30168,"Ġensured":30169,"Ġdiscouraged":30170,"mort":30171,"Ġflank":30172,"Ġcess":30173,"Ġreacts":30174,"ĠSere":30175,"sensitive":30176,"ĠSerpent":30177,"assad":30178,"Ġ247":30179,"Ġcalmly":30180,"busters":30181,"Ġbleed":30182,"ĠStro":30183,"Ġamusement":30184,"ĠAntarctica":30185,"Ġscept":30186,"ĠGaw":30187,"aq":30188,"asonic":30189,"Ġsprawling":30190,"native":30191,"aturated":30192,"ĠBattlefield":30193,"IVERS":30194,"EB":30195,"ĠGems":30196,"ĠNorthwestern":30197,"ĠFilms":30198,"ĠAutomatic":30199,"Ġapprehend":30200,"ãģ¨":30201,"ĠguiName":30202,"Ġbackend":30203,"Ġevidenced":30204,"geant":30205,"012":30206,"ĠSiege":30207,"ĠexternalTo":30208,"ĠunfocusedRange":30209,"ĠguiActiveUnfocused":30210,"ĠguiIcon":30211,"ĠexternalToEVA":30212,"ĠexternalToEVAOnly":30213,"Fri":30214,"chard":30215,"enaries":30216,"Ġchiefs":30217,"Ġcf":30218,"ĠHUD":30219,"Ġcorrobor":30220,"ĠdB":30221,"ĠTaken":30222,"ĠPatricia":30223,"rail":30224,"ĠCharm":30225,"ĠLibertarian":30226,"rieve":30227,"Personal":30228,"ĠOUR":30229,"geries":30230,"Ġdumping":30231,"Ġneurological":30232,"itimate":30233,"ĠClintons":30234,"rafted":30235,"ĠMolly":30236,"Ġterminals":30237,"register":30238,"Ġflare":30239,"Ġencoded":30240,"Ġautopsy":30241,"pel":30242,"machine":30243,"Ġexemptions":30244,"ĠRoyals":30245,"distance":30246,"Ġdrafts":30247,"Ġlame":30248,"ĠCunning":30249,"Ġspouses":30250,"ĠMarkets":30251,"ĠCarrier":30252,"Ġimplying":30253,"ĠYak":30254,"sid":30255,"Ġloser":30256,"Ġvigilant":30257,"Ġimpeachment":30258,"Ġaugmented":30259,"ĠEmployees":30260,"Ġunintended":30261,"ternally":30262,"ĠWatt":30263,"Ġrecognizable":30264,"essim":30265,"æĿ":30266,"Ġcoated":30267,"rha":30268,"Ġlieutenant":30269,"ĠLegislation":30270,"published":30271,"444":30272,"013":30273,"Ġideally":30274,"ĠPassword":30275,"Ġsimplify":30276,"ĠMeta":30277,"ĠMRI":30278,"Ġpleading":30279,"organized":30280,"handler":30281,"Ġunravel":30282,"correct":30283,"Ġicy":30284,"Ġparanoid":30285,"Ġpasser":30286,"Ġinspections":30287,"ofer":30288,"ĠHealthcare":30289,"283":30290,"ĠBrut":30291,"iola":30292,"forge":30293,"ĠMedieval":30294,"MSN":30295,"ievers":30296,"ĠProgramming":30297,"åī":30298,"Ġ223":30299,"mu":30300,"ĠCLE":30301,"uga":30302,"Ġshoppers":30303,"Ġinformative":30304,"ĠPlans":30305,"Ġsupplementation":30306,"ĠTests":30307,"tyard":30308,"ocytes":30309,"ĠVega":30310,"ĠGujarat":30311,"ermanent":30312,"Except":30313,"ĠLOT":30314,"alla":30315,"ĠCumm":30316,"ĠOsw":30317,"Ġvenom":30318,"ĠDebt":30319,"ĠDOWN":30320,"Ġreunion":30321,"Ġmuc":30322,"ĠRelief":30323,"Ġgeop":30324,"ĠðŁĺ":30325,"alogue":30326,"Anth":30327,"echo":30328,"Ġcorros":30329,"Ġreplication":30330,"ĠBlazing":30331,"ĠDaughter":30332,"Ġinflic":30333,"ĠLindsey":30334,"ÙĪ":30335,"284":30336,"Exit":30337,"Ġgloom":30338,"TAIN":30339,"Ġundermining":30340,"Ġadvising":30341,"hidden":30342,"Ġoverflow":30343,"Ġgor":30344,"urdue":30345,"Ġechoes":30346,"enhagen":30347,"Ġimpuls":30348,"drug":30349,"cash":30350,"Ġasync":30351,"Ġmirac":30352,"atts":30353,"punk":30354,"Ġpivot":30355,"ĠLegislative":30356,"Ġbloggers":30357,"ĠClaw":30358,"sburg":30359,"dyl":30360,"ĠRecommend":30361,"Ġverte":30362,"Ġprohibiting":30363,"ĠPanther":30364,"Jonathan":30365,"Ġomin":30366,"Ġhateful":30367,"281":30368,"ĠOrche":30369,"ĠMurdoch":30370,"downs":30371,"Ġasymm":30372,"GER":30373,"Always":30374,"Ġinforms":30375,"ĠWM":30376,"ĠPony":30377,"ĠAppendix":30378,"ĠArlington":30379,"Jam":30380,"Ġmedicinal":30381,"ĠSlam":30382,"ITIES":30383,"Ġreaff":30384,"ĠRi":30385,"FG":30386,"Spring":30387,"bool":30388,"Ġthighs":30389,"Ġmarkings":30390,"ĠRaqqa":30391,"ĠLak":30392,"poll":30393,"tsky":30394,"ĠMorty":30395,"ĠDefinition":30396,"Ġdebunk":30397,"endered":30398,"ĠLeone":30399,"avers":30400,"Ġmortgages":30401,"Apparently":30402,"Nic":30403,"haus":30404,"ĠThousands":30405,"auld":30406,"Ġmash":30407,"shoot":30408,"Ġdiarr":30409,"Ġconsciously":30410,"Hero":30411,"eas":30412,"ĠNaturally":30413,"ĠDestroyer":30414,"Ġdashboard":30415,"services":30416,"Rog":30417,"Ġmillennials":30418,"Ġinvade":30419,"-(":30420,"Ġcommissions":30421,"ĠAuckland":30422,"Ġbroadcasts":30423,"Ġfrontal":30424,"Ġcrank":30425,"ĠHistoric":30426,"Ġrumours":30427,"CTV":30428,"Ġsteril":30429,"Ġbooster":30430,"rocket":30431,"ãĤ¼":30432,"utsche":30433,"ĠPI":30434,"Ġ233":30435,"ĠProducer":30436,"ĠAnalytics":30437,"Ġinvaluable":30438,"Ġunintention":30439,"ĠCY":30440,"Ġscrutin":30441,"Ġgigg":30442,"Ġengulf":30443,"Ġproletariat":30444,"Ġhacks":30445,"ĠHew":30446,"arak":30447,"ĠSlime":30448,"ielding":30449,"agher":30450,"ĠElliot":30451,"Ġtelecom":30452,"Ġ219":30453,"ultan":30454,"ĠArbor":30455,"ĠScouts":30456,"Ban":30457,"Ġlifespan":30458,"Ġblasp":30459,"388":30460,"Ġjudiciary":30461,"ĠContinental":30462,"asking":30463,"McC":30464,"LED":30465,"Ġbaggage":30466,"ĠSorcerer":30467,"Ġremnants":30468,"ĠGriffith":30469,"etsu":30470,"ĠSubaru":30471,"ĠPersonality":30472,"designed":30473,"ushima":30474,"agnar":30475,"Ġrecoil":30476,"Ġpassions":30477,"\\\":":30478,"Ġtee":30479,"Ġabolition":30480,"ĠCreating":30481,"jac":30482,"Ġ194":30483,"019":30484,"Ġpillars":30485,"riched":30486,"/\"":30487,"tk":30488,"Ġlivelihood":30489,"Ġroasted":30490,"ahon":30491,"ĠHutch":30492,"assert":30493,"Ġdividend":30494,"Ġknit":30495,"Ġdaunting":30496,"Ġdisturbance":30497,"Ġshale":30498,"Ġcultivated":30499,"Ġrefrigerator":30500,"LB":30501,"ĠNET":30502,"Ġcommercials":30503,"Ġthinkers":30504,"455":30505,"Ġchop":30506,"Broad":30507,"Ġsuspicions":30508,"Ġtagged":30509,"lifting":30510,"Ġstylish":30511,"ĠShields":30512,"Shortly":30513,"Ġtails":30514,"Auth":30515,"STE":30516,"ĠGAME":30517,"Ġseism":30518,"ĠKis":30519,"ologne":30520,"Ġcowork":30521,"Ġforcibly":30522,"Ġthyroid":30523,"ĠPB":30524,"ANE":30525,"married":30526,"horse":30527,"Ġpolymer":30528,"ĠChal":30529,"odor":30530,"DEBUG":30531,"ĠContext":30532,"Ġbliss":30533,"Ġpinpoint":30534,"ĠMathemat":30535,"legram":30536,"ĠWeekend":30537,"Ġlabelled":30538,"Ġbart":30539,"itles":30540,"Ġestrogen":30541,"âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ":30542,"\"'":30543,"Ġvisibly":30544,"Ġoutsider":30545,"aida":30546,"Area":30547,"Ġdissemin":30548,"Ġdishonest":30549,"ĠClosed":30550,"ĠBulletin":30551,"ĠRamsey":30552,"sword":30553,"ĠXI":30554,"ourced":30555,"Same":30556,"346":30557,"ĠRepe":30558,"ĠKou":30559,"cake":30560,"emis":30561,"Cache":30562,"ĠMeaning":30563,"ĠEnlight":30564,"onomy":30565,"Ġmanifestation":30566,"sworth":30567,"Jay":30568,"Ġchore":30569,"ör":30570,"Dream":30571,"Ġsanctioned":30572,"Ġculturally":30573,"ĠAra":30574,"Nav":30575,"Ġtheological":30576,"Ġstrut":30577,"ĠVO":30578,"ĠHandbook":30579,"Ġconstructing":30580,"Ġ¶":30581,"ĠBenefits":30582,"ĠPsychological":30583,"sac":30584,"å¸":30585,"policy":30586,"ĠMatters":30587,"ĠReported":30588,"ĠByte":30589,"Ġvitro":30590,"ĠMaiden":30591,"Ġlam":30592,"ĠJennings":30593,"Ġgarment":30594,"ĠRutgers":30595,"ĠStafford":30596,"ĠWellington":30597,"Ġintermitt":30598,"Ġnpm":30599,"Ġordeal":30600,"Ġplugged":30601,"ooming":30602,"inished":30603,"framework":30604,"Ġtimber":30605,"Ġcass":30606,"Ġ850":30607,"iless":30608,"ĠRedux":30609,"768":30610,"Stre":30611,"Ġsurpassed":30612,"whel":30613,"Ġparallels":30614,"Ġveil":30615,"ĠGI":30616,"ĠREST":30617,"Ġreadiness":30618,"sort":30619,"Ġmodifying":30620,"ĠSlate":30621,"ruff":30622,"Ġmarble":30623,"Ġinfrared":30624,"Ġauditor":30625,"ĠFANTASY":30626,"ĠPoverty":30627,"ĠSPD":30628,"Ġ\"(":30629,"Ky":30630,"RAY":30631,"Ġexecutions":30632,"ĠBeverly":30633,"ĠMarxism":30634,"ĠBurst":30635,"ĠKali":30636,"estones":30637,"Clearly":30638,"Ell":30639,"ãģ§":30640,"ĠProceedings":30641,"Token":30642,"IFIC":30643,"ña":30644,"Central":30645,"ĠHaley":30646,"ĠDrama":30647,"Ġformations":30648,"ORN":30649,"Books":30650,"Ġdominating":30651,"ĠFlyers":30652,"ĠCompanion":30653,"Ġdisciplined":30654,"ĠYugoslav":30655,"ĠSpells":30656,"Ġvengeance":30657,"Ġlandlords":30658,"Len":30659,"ĠOgre":30660,"anoia":30661,"Ġpiercing":30662,"Ġcongreg":30663,"Ġscorer":30664,"obia":30665,"Ġnickel":30666,"ĠLearns":30667,"Ġrejo":30668,"Ġmasterpiece":30669,"Flash":30670,"Ġinhabited":30671,"ĠOpenGL":30672,"ĠDud":30673,"ĠICO":30674,"Ġarter":30675,"Ġplur":30676,"Ġmastery":30677,"Ġlongstanding":30678,"sted":30679,"Ġwines":30680,"Ġtelevised":30681,"ĠShrine":30682,"ĠBayern":30683,"Ġâĵĺ":30684,"Ġenclosure":30685,"john":30686,"Ġprophets":30687,"ĠResurrection":30688,"ĠOrders":30689,"Ġuneven":30690,"rals":30691,"Ġdwind":30692,"ĠLah":30693,"ĠSloven":30694,"378":30695,"Ġinsistence":30696,"affle":30697,"ĠClone":30698,"Ġhardship":30699,"ĠCongressman":30700,"Ġplead":30701,"Ġreviewers":30702,"Ġcured":30703,"Ġ1935":30704,"asley":30705,"fake":30706,"ĠThinking":30707,"ydia":30708,"PART":30709,"ĠDota":30710,"oit":30711,"Ġwhipped":30712,"Ġbouncing":30713,"ĠHispanics":30714,"comings":30715,"Ġcannabin":30716,"ĠChambers":30717,"ĠZack":30718,"Optional":30719,"Ġcoats":30720,"Ġprowess":30721,"ĠNorton":30722,"Ġplainly":30723,"Ġfreight":30724,"Ġinhibition":30725,"Ġclam":30726,"Ġ303":30727,"kef":30728,"aleigh":30729,"Luke":30730,"Ġpsycho":30731,"atorium":30732,"MED":30733,"Ġtreaties":30734,"Ġindisc":30735,"Ġdc":30736,"OPS":30737,"Ġresilient":30738,"ĠInterstate":30739,"Ġslack":30740,"Ġmundane":30741,"Ġestablishes":30742,"359":30743,"Ġstrained":30744,"Ġnond":30745,"Sus":30746,"Ġcaste":30747,"arate":30748,"ieving":30749,"Ġunfairly":30750,"Ġparser":30751,"onial":30752,"ursive":30753,"Via":30754,"ĠOtto":30755,"ĠAuthorities":30756,"stroke":30757,"KR":30758,"ĠMercy":30759,"Ġfurnished":30760,"Ġoutset":30761,"Ġmetic":30762,"1982":30763,"olithic":30764,"ĠTent":30765,"ogical":30766,"ĠAircraft":30767,"Ġhides":30768,"ĠBecame":30769,"Ġeducators":30770,"reaching":30771,"Ġvolatility":30772,"Ġtoddler":30773,"ĠNASCAR":30774,"ĠTwelve":30775,"ĠHighlights":30776,"Ġgrape":30777,"Ġsplits":30778,"Ġpeasant":30779,"Ġreneg":30780,"ĠMSI":30781,"Temp":30782,"stars":30783,"Ġtrek":30784,"ĠHyde":30785,"binding":30786,"Ġrealism":30787,"Ġoxide":30788,"ĠHos":30789,"Ġmounts":30790,"Ġbiting":30791,"Ġcollapsing":30792,"Ġpostal":30793,"Ġmuseums":30794,"Ġdetached":30795,"Ġrespecting":30796,"Ġmonopol":30797,"Ġworkflow":30798,"ĠCake":30799,"Template":30800,"ĠOrganisation":30801,"Ġpersistence":30802,"369":30803,"Coming":30804,"Brad":30805,"Ġredundant":30806,"ĠGTA":30807,"Ġbending":30808,"Ġrevoked":30809,"Ġoffending":30810,"Ġframing":30811,"Ġprintf":30812,"Commun":30813,"members":30814,"Outside":30815,"Ġconstrued":30816,"Ġcoded":30817,"FORE":30818,"Ġchast":30819,"Chat":30820,"Indian":30821,"ĠYard":30822,"?!\"":30823,"ĠPorts":30824,"ĠXavier":30825,"ĠRET":30826,"'.\"":30827,"ĠBoat":30828,"ivated":30829,"icht":30830,"umerable":30831,"Ds":30832,"ĠDunn":30833,"Ġcoffin":30834,"Ġsecurely":30835,"ĠRaptors":30836,"ĠBes":30837,"Installation":30838,"Ġinception":30839,"ĠHealthy":30840,"endants":30841,"Ġpsychologists":30842,"ĠSheikh":30843,"cultural":30844,"ĠBlackBerry":30845,"shift":30846,"Fred":30847,"oche":30848,"Ġcakes":30849,"ĠSEO":30850,"ĠGian":30851,"ĠAsians":30852,"ogging":30853,"element":30854,"Ġpundits":30855,"ĠVaugh":30856,"ĠGavin":30857,"Ġhitter":30858,"Ġdrowned":30859,"Ġchalk":30860,"ĠZika":30861,"Ġmeasles":30862,"802":30863,"âĢ¦..":30864,"ĠAWS":30865,"]\"":30866,"Ġdistort":30867,"ĠMast":30868,"Ġantibodies":30869,"ĠMash":30870,"Memory":30871,"ĠUganda":30872,"ĠProb":30873,"Ġvomiting":30874,"ĠTurns":30875,"Ġoccupying":30876,"Ġevasion":30877,"ĠTherapy":30878,"Ġpromo":30879,"Ġelectr":30880,"Ġblueprint":30881,"ĠDre":30882,"priced":30883,"ĠDepot":30884,"Ġalleviate":30885,"ĠSomali":30886,"marg":30887,"nine":30888,"Ġnostalgia":30889,"ĠShepherd":30890,"Ġcavalry":30891,"Ġtorped":30892,"ĠBloody":30893,"xb":30894,"Ġsank":30895,"Ġgoalt":30896,"reportprint":30897,"embedreportprint":30898,"cloneembedreportprint":30899,"ĠInitially":30900,"ĠFischer":30901,"Ġnoteworthy":30902,"cern":30903,"Ġinefficient":30904,"rawdownload":30905,"rawdownloadcloneembedreportprint":30906,"cation":30907,"ĠDynasty":30908,"lag":30909,"DES":30910,"Ġdistinctly":30911,"ĠEstonia":30912,"Ġopenness":30913,"Ġgossip":30914,"ruck":30915,"Width":30916,"ĠIbrahim":30917,"Ġpetroleum":30918,"Ġavatar":30919,"ĠHed":30920,"atha":30921,"ĠHogwarts":30922,"Ġcaves":30923,"678":30924,"Ġsafeguard":30925,"ĠMog":30926,"isson":30927,"ĠDurham":30928,"slaught":30929,"ĠGraduate":30930,"Ġsubconscious":30931,"ĠExcellent":30932,"ĠDum":30933,"-----":30934,"Ġpiles":30935,"ĠWORK":30936,"ĠGarn":30937,"ĠFol":30938,"ĠATM":30939,"Ġavoids":30940,"ĠTul":30941,"Ġbleak":30942,"ELY":30943,"ivist":30944,"lightly":30945,"Pers":30946,"ĠDob":30947,"ĠLS":30948,"Ġinsanity":30949,"ε":30950,"atalie":30951,"Enlarge":30952,"Ġtwists":30953,"Ġfaulty":30954,"Ġpiracy":30955,"Ġimpover":30956,"Ġrugged":30957,"ĠFashion":30958,"Ġsands":30959,"'?":30960,"swick":30961,"Ġnatives":30962,"Ġhen":30963,"ĠNoise":30964,"ãĥĹ":30965,"Ġgreens":30966,"Ġfreezer":30967,"Ġdynasty":30968,"ĠFathers":30969,"ĠNewark":30970,"Ġarchaeological":30971,"Ġot":30972,"obar":30973,"Ġblockade":30974,"Ġallerg":30975,"LV":30976,"Ġdebit":30977,"ĠRFC":30978,"ĠMilton":30979,"ĠPressure":30980,"Ġwillingly":30981,"Ġdisproportionate":30982,"Ġoppressive":30983,"Ġdiamonds":30984,"Ġbelongings":30985,"1970":30986,"Ġbells":30987,"Ġimperialism":30988,"Ġ227":30989,"Ġexploding":30990,"ĠEclipse":30991,"Ġ1919":30992,"Ġrant":30993,"Ġnominations":30994,"347":30995,"Ġpeacefully":30996,"rica":30997,"ĠFUCK":30998,"Ġvibration":30999,"malink":31000,"Ġropes":31001,"ĠIvanka":31002,"ĠBrewery":31003,"ĠBooker":31004,"ĠOwens":31005,"goers":31006,"Services":31007,"ĠSnape":31008,"Ġ191":31009,"395":31010,"Ġ299":31011,"justice":31012,"Ġbri":31013,"Ġdiscs":31014,"Ġprominently":31015,"Ġvulgar":31016,"Ġskipping":31017,"lves":31018,"Ġtsunami":31019,"374":31020,"ĠUrug":31021,"ĠEid":31022,"recated":31023,"phen":31024,"Ġfaults":31025,"ĠStarted":31026,"950":31027,"Ġpi":31028,"Ġdetector":31029,"Ġbastard":31030,"Ġvalidated":31031,"SpaceEngineers":31032,"OURCE":31033,"Ġ(~":31034,"Ġunsur":31035,"Ġaffirmed":31036,"Ġfascism":31037,"Ġresolving":31038,"ĠChavez":31039,"ĠCyn":31040,"Ġdetract":31041,"Lost":31042,"Ġrigged":31043,"Ġhomage":31044,"ĠBruno":31045,"555":31046,"eca":31047,"Ġpresses":31048,"Ġhumour":31049,"Ġspacing":31050,"Ġ'/":31051,"olkien":31052,"Coun":31053,"OPER":31054,"Tre":31055,"Son":31056,"ĠCambodia":31057,"ierre":31058,"mong":31059,"ozy":31060,"Ġliquidity":31061,"ĠSoviets":31062,"ĠFernando":31063,"Ġ229":31064,"Ġslug":31065,"ĠCatalan":31066,"electric":31067,"Ġscenery":31068,"ĠHearth":31069,"Ġconstrained":31070,"Ġgoalie":31071,"ĠGuidelines":31072,"ĠAmmo":31073,"ĠPearson":31074,"Ġtaxed":31075,"Ġfetus":31076,"Response":31077,"ĠAlexis":31078,"thia":31079,"Guy":31080,"Ġreconstruct":31081,"Ġextremes":31082,"Ġconcluding":31083,"ĠPeg":31084,"ooks":31085,"Ġdeductions":31086,"Rose":31087,"Ġgroundbreaking":31088,"ĠTarg":31089,"ãĥģ":31090,"ĠReve":31091,"resource":31092,"Ġmoons":31093,"Ġelectromagnetic":31094,"Ġamidst":31095,"ĠViktor":31096,"NESS":31097,"BACK":31098,"Ġcommute":31099,"ĠAnaheim":31100,"Ġfluctuations":31101,"640":31102,"Ġnoodles":31103,"ĠCopenhagen":31104,"ĠTide":31105,"ĠGrizz":31106,"ĠSEE":31107,"Ġpipelines":31108,"Ġscars":31109,"endo":31110,"agus":31111,"ĠETF":31112,"/#":31113,"ĠBecome":31114,"448":31115,"Ġvisc":31116,"ĠRecommended":31117,"Ġjumper":31118,"Ġcognition":31119,"Ġassassin":31120,"Ġwitnessing":31121,"ĠSetup":31122,"Ġlac":31123,"vim":31124,"ISM":31125,"pages":31126,"SSL":31127,"358":31128,"Ġadject":31129,"industrial":31130,"lore":31131,"chery":31132,"Ġglitter":31133,"Ġcalf":31134,"Florida":31135,"Ġspoilers":31136,"Ġsucceeds":31137,"Ġchanting":31138,"Ġslogans":31139,"ĠTracy":31140,"Visit":31141,"rology":31142,"Ġmornings":31143,"Ġlineage":31144,"Ġsip":31145,"Ġintensely":31146,"Ġflourish":31147,"ĠSleeping":31148,"ĠFem":31149,"orpor":31150,"ĠKlan":31151,"ĠDarth":31152,"hack":31153,"ĠNielsen":31154,"Ġtumors":31155,"Ġprocurement":31156,"ĠYorkshire":31157,"Ġraided":31158,"KY":31159,"Anna":31160,"Ġ//[":31161,"ĠDisorder":31162,"ĠMustang":31163,"ĠWen":31164,"ĠTrying":31165,"sq":31166,"Ġdeliveries":31167,"Ġshutter":31168,"Ġcerebral":31169,"Ġbipolar":31170,"ĠCN":31171,"lass":31172,"jet":31173,"Ġdebating":31174,">:":31175,"Ġeagle":31176,"grades":31177,"ĠDixon":31178,"UGC":31179,"MAS":31180,"ĠDraco":31181,"ĠMachines":31182,"affer":31183,"Ġeman":31184,"²":31185,"pron":31186,"ĠGym":31187,"Ġcomparatively":31188,"ĠTribunal":31189,"PRO":31190,"Ġlex":31191,"Ġfertile":31192,"Ġdepressing":31193,"Ġsuperficial":31194,"essential":31195,"ĠHunters":31196,"gp":31197,"Ġprominence":31198,"Liber":31199,"ĠAncest":31200,"otechnology":31201,"Ġmocking":31202,"ĠTraff":31203,"ĸļ":31204,"Medium":31205,"Iraq":31206,"Ġpsychiatrist":31207,"Quantity":31208,"ĠLect":31209,"Ġnoisy":31210,"520":31211,"GY":31212,"Ġslapped":31213,"ĠMTV":31214,"Ġpara":31215,"pull":31216,"Multiple":31217,"asher":31218,"Ġnour":31219,"ĠSeg":31220,"Spell":31221,"vous":31222,"ordial":31223,"Senior":31224,"ĠGoldberg":31225,"ĠPlasma":31226,"need":31227,"Ġmessenger":31228,"eret":31229,"Ġteamed":31230,"Ġliteracy":31231,"ĠLeah":31232,"ĠDoyle":31233,"Ġemitted":31234,"UX":31235,"Ġevade":31236,"Ġmaze":31237,"Ġwrongly":31238,"ĠLars":31239,"Ġstereotype":31240,"Ġpledges":31241,"Ġaroma":31242,"ĠMET":31243,"Ġacre":31244,"ĠOD":31245,"Ġff":31246,"Ġbreweries":31247,"ĠHilton":31248,"undle":31249,"ĠKak":31250,"ĠThankfully":31251,"ĠCanucks":31252,"inctions":31253,"ĠAppears":31254,"Ġcoer":31255,"Ġundermined":31256,"rovers":31257,"Andre":31258,"Ġblaze":31259,"umers":31260,"Ġfamine":31261,"amphetamine":31262,"ulkan":31263,"Amount":31264,"Ġdesperation":31265,"wikipedia":31266,"development":31267,"ĠCorinth":31268,"ussia":31269,"Jackson":31270,"LI":31271,"Native":31272,"Rs":31273,"Ohio":31274,"ĠKathleen":31275,"Fortunately":31276,"Ġattendant":31277,"ĠPreferred":31278,"ĠDidn":31279,"ĠVs":31280,"Mis":31281,"Ġrespondent":31282,"Ġboun":31283,"stable":31284,"Ġpaved":31285,"Ġunexpl":31286,"ĠCheney":31287,"LM":31288,"ĠCull":31289,"blown":31290,"Ġconfronting":31291,"ocese":31292,"serving":31293,"Wi":31294,"ĠLithuania":31295,"anni":31296,"Ġstalk":31297,"hd":31298,"Ġvener":31299,"APH":31300,"ynchronous":31301,"URR":31302,"umably":31303,"historic":31304,"Half":31305,"Hay":31306,"Ġresilience":31307,"spection":31308,"Ġabandoning":31309,"Obs":31310,"ĠDebbie":31311,"Ġgradient":31312,"ĠPlaint":31313,"ĠCanal":31314,"ARCH":31315,"Ġexpansive":31316,"Ġfung":31317,"Ġbounced":31318,"Und":31319,"Ġprecautions":31320,"Ġclarification":31321,"Ġdagger":31322,"Ġgrips":31323,"Ġµ":31324,"ĠRivera":31325,"ĠUndead":31326,"isites":31327,"ĠFIRST":31328,"ño":31329,"audi":31330,"Ġhostages":31331,"Ġcompliant":31332,"Ġalumni":31333,"Seven":31334,"Ġcybersecurity":31335,"either":31336,"Collect":31337,"Ġinvariably":31338,"ĠSoci":31339,"Ġlawmaker":31340,"Ġale":31341,"ĠPersonally":31342,"Nazi":31343,"Ġcustomization":31344,"ĠProc":31345,"ĠSaskatchewan":31346,"eaturing":31347,"Ġspared":31348,"Ġdiscontinued":31349,"Ġcomputational":31350,"ĠMotorola":31351,"Ġsupremacist":31352,"governmental":31353,"Ġparadise":31354,"ĠDowning":31355,"ĠNikon":31356,"Ġcatalyst":31357,"berra":31358,"Toronto":31359,"875":31360,"beta":31361,"ĠMacron":31362,"Ġunrealistic":31363,"vector":31364,"ĠVehicles":31365,"itiveness":31366,"ĠRV":31367,"ĠColbert":31368,"sin":31369,"oji":31370,"entin":31371,"ĠKrish":31372,"hello":31373,"ffield":31374,"oky":31375,"ĠTate":31376,"Ġmaple":31377,"Ġaids":31378,"chemical":31379,"334":31380,"nuts":31381,"ĠWarp":31382,"Ġxx":31383,"ĠRobb":31384,"umerous":31385,"_-_":31386,"ftime":31387,"ĠVW":31388,"Ġwinger":31389,"ĠDome":31390,"tools":31391,"ĠPV":31392,"ĠGeorgetown":31393,"Ġgeared":31394,"Ġjihadists":31395,"Ġcp":31396,"Ġsteroids":31397,"Mother":31398,"clerosis":31399,"ĠDRM":31400,"nesia":31401,"Ġlinger":31402,"Ġimmersive":31403,"ĠCOUN":31404,"Ġoutweigh":31405,"ensual":31406,"Band":31407,"Ġtransforms":31408,"matched":31409,"psons":31410,"ĠJudicial":31411,"factor":31412,"Ġreferral":31413,"Ġoddly":31414,"ĠWenger":31415,"Bring":31416,"ĠBows":31417,"602":31418,"ICLE":31419,"Ġlions":31420,"ĠAcademic":31421,"ĠThorn":31422,"ĠRaider":31423,"kefeller":31424,"Storage":31425,"Lower":31426,"ĠOrt":31427,"ĠEquality":31428,"ALT":31429,"ĠSOC":31430,"Types":31431,"Ġlyn":31432,"ĠAsset":31433,"coat":31434,"TPP":31435,"CVE":31436,"ĠPioneer":31437,"application":31438,"Modern":31439,"ĠHK":31440,"Environment":31441,"Alright":31442,"Rain":31443,"IPP":31444,"ĠShiite":31445,"Ġmound":31446,"ĠAbilities":31447,"condition":31448,"Staff":31449,"Ġcompetence":31450,"ĠMoor":31451,"ĠDiablo":31452,"Ġwithheld":31453,"Ġostensibly":31454,"ĠBrom":31455,"Ġmsg":31456,"Ġdenomin":31457,"ĠReferences":31458,"ĠFP":31459,"Ġplunged":31460,"Ġpamph":31461,"moving":31462,"central":31463,"Ġdownright":31464,"Ġfading":31465,"Tal":31466,"Typ":31467,"ĠThy":31468,"ukes":31469,"ithe":31470,"Ġove":31471,"Ġbattled":31472,"Ġseafood":31473,"Ġfigur":31474,"ĠRD":31475,"crop":31476,"Ġsquads":31477,"{\\":31478,"à¹":31479,"ĠEh":31480,"Ġinterviewing":31481,"ĠQin":31482,"Ġaspiring":31483,"PLIC":31484,"Ġclauses":31485,"ĠGast":31486,"ĠNir":31487,"Ġluggage":31488,"Ġhose":31489,"Ġsystemd":31490,"Ġdescending":31491,"ĠRevised":31492,"ĠRails":31493,"align":31494,"709":31495,"337":31496,"Ġfug":31497,"charging":31498,"tags":31499,"Ġuter":31500,"kish":31501,"WARNING":31502,"490":31503,"profits":31504,"Ġvoyage":31505,"Ġace":31506,"ĠVanguard":31507,"ĠTanks":31508,"ĠMuk":31509,"Ġ226":31510,"Safe":31511,"Armor":31512,"Ġvolcanic":31513,"Ġwomb":31514,"ĠMIL":31515,"Ġbeginner":31516,"ĠRecogn":31517,"ĠAAP":31518,"PLAY":31519,")!":31520,"Ġdetecting":31521,"cn":31522,"Ġbreaches":31523,"Basically":31524,"ĠPag":31525,"ĠMunicipal":31526,"ĠIndie":31527,"ĠLaf":31528,"ĠDisable":31529,"ĠOlson":31530,"Ġrestrained":31531,"Ġrulings":31532,"Ġhumane":31533,"events":31534,"ĠCinema":31535,"displayText":31536,"ĠHatch":31537,"actionDate":31538,"onnaissance":31539,"Ġassaulting":31540,"ĠLug":31541,"CHAT":31542,"Ġvigorous":31543,"ĠPerse":31544,"Ġintolerance":31545,"ĠSnapchat":31546,"ĠSharks":31547,"Ġdummy":31548,"ĠDiagn":31549,"ĠGuitar":31550,"imeters":31551,"403":31552,"REG":31553,"Ax":31554,"Ġseparates":31555,"ĠMahm":31556,"Ġtv":31557,"jah":31558,"OOL":31559,"Circ":31560,"ĠWindsor":31561,"ussian":31562,"Ġintuition":31563,"Ġdisdain":31564,"ĠDonovan":31565,"Ġ221":31566,"Emb":31567,"Ġcondemning":31568,"Ġgenerosity":31569,"zzy":31570,"Ġpanties":31571,"ĠPrevent":31572,"ActionCode":31573,"ANA":31574,"342":31575,"externalActionCode":31576,"Ġspecifying":31577,"Ġcrystall":31578,"Jere":31579,"Ġrupt":31580,"ĠApprentice":31581,"Ġprofiling":31582,"к":31583,"Strike":31584,"Ġsideline":31585,"Ġobligated":31586,"Ġoccult":31587,"Ġbureaucratic":31588,"antically":31589,"rupted":31590,"negative":31591,"ĠEthiopia":31592,"ĠCivic":31593,"Ġinsiders":31594,"eligible":31595,"ĠTVs":31596,"ĠBAR":31597,"ĠTI":31598,"iologist":31599,"ĠAIR":31600,"Ġsubstituted":31601,"Arab":31602,"ĠSaul":31603,"ĠYog":31604,"prem":31605,"Ġbuilders":31606,"Ġstationary":31607,"Ġdoubtful":31608,"Ġvigorously":31609,"Ġthrilling":31610,"Physical":31611,"ĠCarey":31612,"ĠHydra":31613,"geoning":31614,"ĠSly":31615,"yton":31616,"Ġborrowers":31617,"ĠParkinson":31618,"Ġë":31619,"ĠJamaica":31620,"Ġsatir":31621,"Ġinsurgents":31622,"ĠFirm":31623,"Ġisot":31624,"ĠKarn":31625,"ourning":31626,"akens":31627,"docs":31628,"little":31629,"ĠMonaco":31630,"CLASS":31631,"Turkey":31632,"Ly":31633,"ĠConan":31634,"assic":31635,"Ġstarred":31636,"ĠPacers":31637,"eties":31638,"Ġtipping":31639,"Moon":31640,"ĠRw":31641,"same":31642,"Ġcavity":31643,"Ġgoof":31644,"ĠZo":31645,"Shock":31646,"ummer":31647,"Ġemphasizes":31648,"Ġregrett":31649,"Ġnovelty":31650,"Ġenvy":31651,"ĠPassive":31652,"rw":31653,"505":31654,"Ġindifferent":31655,"ĠRica":31656,"ĠHimself":31657,"ĠFreddie":31658,"Ġadip":31659,"ä¸Ģ":31660,"Ġbreakout":31661,"Ġhurried":31662,"ĠHuang":31663,"ĠDisk":31664,"Ġroaming":31665,"?????-?????-":31666,"UV":31667,"ĠRicky":31668,"ĠSigma":31669,"Ġmarginalized":31670,"Ġedits":31671,"Ġ304":31672,"memory":31673,"Ġspecimen":31674,"293":31675,"ãģ¯":31676,"Ġvertically":31677,"Ġaudition":31678,"ĠHeck":31679,"Ġcaster":31680,"ĠHoldings":31681,"adal":31682,"ĠCron":31683,"ĠLiam":31684,"Ġdeflect":31685,"Pick":31686,"ĠDebug":31687,"REF":31688,"Ġversatility":31689,"othes":31690,"classified":31691,"ĠMahar":31692,"ĠHort":31693,"Counter":31694,"stasy":31695,"noticed":31696,"331":31697,"ĠShim":31698,"fuck":31699,"ĠBie":31700,"Ġairing":31701,"ĠProtein":31702,"ĠHolding":31703,"Ġspectators":31704,"iliated":31705,"ĠThatcher":31706,"nosis":31707,"ãĥ¼ãĥ³":31708,"Tele":31709,"Boston":31710,"ĠTempl":31711,"stay":31712,"Ġdeclarations":31713,"479":31714,"Volume":31715,"ĠDesigner":31716,"ĠOverwatch":31717,"idae":31718,"Ġonwards":31719,"Ġnets":31720,"ĠManila":31721,"particularly":31722,"Ġpolitic":31723,"oother":31724,"Ġportraits":31725,"Ġpavement":31726,"cffff":31727,"Ġsaints":31728,"Ġbeginners":31729,"ESPN":31730,"Ġshortcomings":31731,"âķIJâķIJ":31732,"Ġcomet":31733,"ĠOrganic":31734,"quel":31735,"Ġhospitalized":31736,"Break":31737,"Ġpeel":31738,"dylib":31739,"aspx":31740,"urances":31741,"ĠTIM":31742,"Pg":31743,"Ġreadable":31744,"ĠMalik":31745,"Ġmuzzle":31746,"Ġbenchmarks":31747,"dal":31748,"ĠVacc":31749,"ĠHicks":31750,"609":31751,"ĠBiblical":31752,"heng":31753,"Ġoverload":31754,"ĠCivilization":31755,"Ġimmoral":31756,"Ġfries":31757,"ãĤĴ":31758,"Ġreproduced":31759,"Ġformulation":31760,"jug":31761,"irez":31762,"gear":31763,"Ġcoached":31764,"MpServer":31765,"ĠSJ":31766,"ĠKw":31767,"Init":31768,"deal":31769,"ĠOro":31770,"ĠLoki":31771,"ĠSongs":31772,"Ġ232":31773,"ĠLouise":31774,"asionally":31775,"Ġuncond":31776,"ollywood":31777,"Ġprogressives":31778,"ĠEnough":31779,"ĠDoe":31780,"Ġwreckage":31781,"Ġbrushed":31782,"ĠBaseType":31783,"Ġzoning":31784,"ishable":31785,"hetically":31786,"ĠCaucus":31787,"ĠHue":31788,"Ġkarma":31789,"ĠSporting":31790,"Ġtrader":31791,"Ġseeming":31792,"ĠCapture":31793,"430":31794,"bish":31795,"Ġtunes":31796,"Ġindoors":31797,"ĠSphere":31798,"ĠDancing":31799,"TERN":31800,"Ġnob":31801,"ĠGST":31802,"maps":31803,"Ġpeppers":31804,"Fit":31805,"Ġoversees":31806,"ĠRabbi":31807,"ĠRuler":31808,"vertising":31809,"office":31810,"xxx":31811,"Ġraft":31812,"Changed":31813,"Ġtextbooks":31814,"Links":31815,"ĠOmn":31816,"ãĢij":31817,"Ġinconvenience":31818,"ĠDonetsk":31819,"=~":31820,"Ġimplicitly":31821,"Ġboosts":31822,"ĠBones":31823,"ĠBoom":31824,"Courtesy":31825,"Ġsensational":31826,"ANY":31827,"Ġgreedy":31828,"eden":31829,"Ġinexper":31830,"ĠLer":31831,"ĠVale":31832,"Ġtighten":31833,"ĠEAR":31834,"ĠNum":31835,"Ġancestor":31836,"Sent":31837,"ĠHorde":31838,"urgical":31839,"allah":31840,"Ġsap":31841,"amba":31842,"ĠSpread":31843,"twitch":31844,"Ġgrandson":31845,"Ġfracture":31846,"Ġmoderator":31847,"ĠSeventh":31848,"ĠReverse":31849,"Ġestimation":31850,"Choose":31851,"Ġparach":31852,"Ġbarric":31853,"ãĢIJ":31854,"Ġcompass":31855,"Ġallergic":31856,"âĢķ":31857,"OTHER":31858,"errilla":31859,"Ġwagon":31860,"Ġzinc":31861,"Ġrubbed":31862,"ĠFuller":31863,"ĠLuxembourg":31864,"ĠHoover":31865,"Ġliar":31866,"ĠEvening":31867,"ĠCobb":31868,"esteem":31869,"Ġselector":31870,"ĠBrawl":31871,"isance":31872,"ĠEk":31873,"Ġtroop":31874,"Ġguts":31875,"ĠAppeal":31876,"ĠTibetan":31877,"Ġroutines":31878,"ĠMent":31879,"Ġsummarized":31880,"steamapps":31881,"Ġtranqu":31882,"Ġ1929":31883,"oran":31884,"ĠAuthent":31885,"Ġgmaxwell":31886,"Ġapprehens":31887,"Ġpoems":31888,"Ġsausage":31889,"ĠWebster":31890,"urus":31891,"Ġthemed":31892,"Ġlounge":31893,"Ġcharger":31894,"Spoiler":31895,"Ġspilled":31896,"hog":31897,"ĠSunder":31898,"ĠAin":31899,"ĠAngry":31900,"Ġdisqual":31901,"ĠFrequency":31902,"ĠEthernet":31903,"Ġhelper":31904,"Percent":31905,"Ġhorrifying":31906,"Ġail":31907,"ĠAllan":31908,"EEE":31909,"ĠCrossing":31910,"449":31911,"Ġholog":31912,"ĠPuzzles":31913,"ĠGoes":31914,"erenn":31915,"604":31916,"ãģı":31917,"ĠRafael":31918,"Ġatten":31919,"ĠEmanuel":31920,"Ġupro":31921,"ĠSusp":31922,"Psych":31923,"ĠTrainer":31924,"ĠNES":31925,"ĠHunts":31926,"becue":31927,"Ġcounselor":31928,"Rule":31929,"Ġtoxins":31930,"Ġbanners":31931,"rifice":31932,"Ġgreeting":31933,"Ġfrenzy":31934,"Ġallocate":31935,"Ġ*)":31936,"expr":31937,"503":31938,"ĠChick":31939,"ĠTorn":31940,"Ġconsolidation":31941,"ĠFletcher":31942,"switch":31943,"frac":31944,"clips":31945,"ĠMcKin":31946,"ĠLunar":31947,"Month":31948,"ITCH":31949,"Ġscholarly":31950,"raped":31951,"398":31952,"Ġ1910":31953,"Ġegreg":31954,"Ġinsecure":31955,"Ġvictorious":31956,"cffffcc":31957,"Ġsingled":31958,"Ġelves":31959,"ĠWond":31960,"burst":31961,"Ġcamoufl":31962,"ĠBLACK":31963,"Ġconditioned":31964,"çī":31965,"answered":31966,"Ġcompulsory":31967,"ascist":31968,"Ġpodcasts":31969,"ĠFrankfurt":31970,"bnb":31971,"Ġneoliberal":31972,"ĠKeyboard":31973,"ĠBelle":31974,"warm":31975,"Ġtrusts":31976,"Ġinsured":31977,"ĠBucc":31978,"usable":31979,"607":31980,"ĠPlains":31981,"Ġ1890":31982,"Ġsabotage":31983,"Ġlodged":31984,"felt":31985,"Ġga":31986,"ĠNarc":31987,"ĠSalem":31988,"Ġseventy":31989,"ĠBlank":31990,"pocket":31991,"Ġwhisper":31992,"Ġmating":31993,"omics":31994,"ĠSalman":31995,"ĠKad":31996,"Ġangered":31997,"Ġcollisions":31998,"Ġextraordinarily":31999,"Ġcoercion":32000,"Ghost":32001,"birds":32002,"èĢ":32003,"kok":32004,"Ġpermissible":32005,"avorable":32006,"Ġpointers":32007,"Ġdissip":32008,"aci":32009,"Ġtheatrical":32010,"ĠCosmic":32011,"Ġforgetting":32012,"Ġfinalized":32013,"大":32014,"yout":32015,"library":32016,"Ġbooming":32017,"ĠBelieve":32018,"ĠTeacher":32019,"ĠLiv":32020,"ĠGOODMAN":32021,"ĠDominican":32022,"ORED":32023,"ĠParties":32024,"Ġprecipitation":32025,"ĠSlot":32026,"Roy":32027,"ĠCombined":32028,"Ġintegrating":32029,"Ġchrome":32030,"Ġintestinal":32031,"ĠRebell":32032,"Ġmatchups":32033,"Ġblockbuster":32034,"ĠLoren":32035,"ĠLevy":32036,"Ġpreaching":32037,"ĠSending":32038,"ĠPurpose":32039,"rax":32040,"fif":32041,"Ġauthoritative":32042,"ĠPET":32043,"astical":32044,"Ġdishon":32045,"Ġchatting":32046,"Ġ\"$:/":32047,"Connection":32048,"Ġrecreate":32049,"Ġdelinqu":32050,"Ġbroth":32051,"ĠDirty":32052,"ĠAdmin":32053,"zman":32054,"Ġscholarships":32055,"Ġ253":32056,"contact":32057,"alsa":32058,"767":32059,"creen":32060,"abbage":32061,"Ġ1915":32062,"Ġblended":32063,"Ġalarmed":32064,"Language":32065,"356":32066,"Ġblends":32067,"ĠChanged":32068,"Wolf":32069,"Ġhepat":32070,"Creating":32071,"Ġpersecut":32072,"Ġsweetness":32073,"arte":32074,"Ġforfeiture":32075,"ĠRoberto":32076,"impro":32077,"NFL":32078,"ĠMagnet":32079,"Detailed":32080,"Ġinsignificant":32081,"ĠPOLIT":32082,"ĠBBQ":32083,"ĠCPS":32084,"Ġseaw":32085,"aminer":32086,"mL":32087,"endif":32088,"finals":32089,"Ġ265":32090,"uish":32091,"Ġ})":32092,"ĠProblems":32093,"Ġemblem":32094,"Ġseriousness":32095,"Ġparsing":32096,"Ġsubstitution":32097,"Ġpressured":32098,"Ġrecycled":32099,"aleb":32100,"Ruby":32101,"Ġproficiency":32102,"Driver":32103,"ĠWester":32104,":'":32105,"AFTA":32106,"Ġmantle":32107,"ĠClayton":32108,"flag":32109,"Ġpractitioner":32110,"covered":32111,"ĠStruct":32112,"addafi":32113,"425":32114,"ĠTownship":32115,"ĠHydro":32116,"Louis":32117,"343":32118,"Ġcondo":32119,"ĠTao":32120,"Ġutilization":32121,"Ġnausea":32122,"ĠDems":32123,"ridges":32124,"pause":32125,"Ġformulas":32126,"Ġchallenger":32127,"376":32128,"Ġdefective":32129,"ĠRailway":32130,"ĠPubMed":32131,"Ġyogurt":32132,"lbs":32133,"ĠNorfolk":32134,"OPE":32135,"ĠMoody":32136,"Ġdistributor":32137,"Ġscrolls":32138,"Ġextracts":32139,"Stan":32140,"Ġviability":32141,"Ġexposes":32142,"Ġstarvation":32143,"ĠSteps":32144,"ĠDodd":32145,"few":32146,"STD":32147,"332":32148,"Ġclosures":32149,"Ġcomplementary":32150,"ĠSasha":32151,"umpy":32152,"Ġmonet":32153,"Ġarticulate":32154,"ĠDoct":32155,"killer":32156,"Ġscrim":32157,"Ġ264":32158,"Ġprostitutes":32159,"Ġsevered":32160,"Ġattachments":32161,"Ġcooled":32162,"Lev":32163,"ĠFalk":32164,"fail":32165,"Ġpoliceman":32166,"ĠDag":32167,"Ġprayed":32168,"ĠKernel":32169,"Ġclut":32170,"Ġcath":32171,"Ġanomaly":32172,"Storm":32173,"emaker":32174,"ĠBreakfast":32175,"uli":32176,"oire":32177,"JJ":32178,"hz":32179,"Operation":32180,"ĠSick":32181,"354":32182,"ĠGuatemala":32183,"Rate":32184,"Ġexposures":32185,"faces":32186,"ĠArchae":32187,"raf":32188,"ĠMia":32189,"Ġ2025":32190,"Ġopaque":32191,"Ġdisguised":32192,"ĠHeadquarters":32193,"Sah":32194,"Ġpots":32195,"978":32196,"ĠMalf":32197,"Ġfrowned":32198,"Ġpoisonous":32199,"ĠConvers":32200,"eeks":32201,"Ġcrab":32202,".\"\"":32203,"Ġtreason":32204,"Ġranc":32205,"Ġescalating":32206,"Ġwarr":32207,"Ġmobs":32208,"Ġlamps":32209,"ĠSunshine":32210,"ĠBrunswick":32211,"Phones":32212,"Ġspelled":32213,"ĠSkip":32214,"Ġ2050":32215,"Ġ1911":32216,"ĠPluto":32217,"ĠAmend":32218,"Ġmeats":32219,"387":32220,"Ġstomp":32221,"ĠZhou":32222,"ĠLeviathan":32223,"ĠHazard":32224,"adv":32225,"ĠOrwell":32226,"Ġaloud":32227,"Ġbumper":32228,"ĠAnarch":32229,"ubuntu":32230,"ĠSerious":32231,"fitting":32232,"ĠOptional":32233,"ĠCecil":32234,"REAM":32235,"Ġserotonin":32236,"Ġcultivate":32237,"agogue":32238,"}\\":32239,"Ġmosques":32240,"ĠSunny":32241,"Ġreactive":32242,"revolution":32243,"ĠLup":32244,"ĠFedora":32245,"Ġdefenseman":32246,"ĠVID":32247,"istine":32248,"Ġdrowning":32249,"ĠBroadcasting":32250,"Ġthriller":32251,"ĠScy":32252,"Ġaccelerating":32253,"Ġdirects":32254,"odied":32255,"bike":32256,"duration":32257,"Ġpainfully":32258,"Redd":32259,"Ġproductions":32260,"Ġgag":32261,"Ġwhist":32262,"Ġsock":32263,"Ġinfinitely":32264,"ĠConcern":32265,"ĠCitadel":32266,"Ġlieu":32267,"Ġcandles":32268,"ogeneous":32269,"arger":32270,"Ġheavenly":32271,"inflammatory":32272,"Performance":32273,"Cs":32274,"ructose":32275,"azaki":32276,"Ġpessim":32277,"Ġinference":32278,"Ġpowd":32279,"ĠZoe":32280,"Ġpaints":32281,"Ġdazz":32282,"pta":32283,"-----------":32284,"Ġinspir":32285,"ĠExperimental":32286,"ĠKnife":32287,"regor":32288,"bors":32289,"Ġshowers":32290,"romeda":32291,"Ġsaint":32292,"Ġbenign":32293,"ĠJiang":32294,"Ġenvisioned":32295,"Ġshroud":32296,"IFT":32297,"HO":32298,"Ġshuff":32299,"ĠICC":32300,"Ġsegreg":32301,"Ġrevisit":32302,"ighthouse":32303,"Li":32304,"Ġsubstrate":32305,"ĠSeas":32306,"ĠReward":32307,"ĠHep":32308,"ĠBrass":32309,"sbm":32310,"Ġeliminates":32311,"Ġstamina":32312,"ĠVAT":32313,"ĠLoan":32314,"Ġconstraint":32315,"Ġappropriated":32316,"Ġpes":32317,"ĠALE":32318,"ranging":32319,"Ġ404":32320,"392":32321,"Ġintellectuals":32322,"achu":32323,"Ġrestructuring":32324,"ĠLevin":32325,"Ġrunes":32326,"Ġdelightful":32327,"Ġcarbohydrates":32328,"ĠModels":32329,"ĠExpo":32330,"Ġtransporting":32331,"alloc":32332,"Ġringing":32333,"Samsung":32334,"Ġscarcely":32335,"ĠURLs":32336,"ĠMAS":32337,"Ġprototypes":32338,"Ġnarrator":32339,"ĠCPUs":32340,"cdn":32341,"ĠBarton":32342,"Ġdecidedly":32343,"ĠShu":32344,"ixir":32345,"ocious":32346,"ĠMyst":32347,"Nintendo":32348,"Ġreuse":32349,"Ġforgiven":32350,"Few":32351,"inical":32352,"nat":32353,"Ġseamless":32354,"ĠEva":32355,"ĠEVE":32356,"ĠJO":32357,"landers":32358,"Ġsofter":32359,"negie":32360,"Ġtransient":32361,"Ġorbital":32362,"Ġfulfil":32363,"ĠKom":32364,"Hopefully":32365,"Ġdynamically":32366,"ĠHunger":32367,"åĽ":32368,"ĠArmenia":32369,"elman":32370,"berto":32371,"Ġpige":32372,"ĠIDs":32373,"limit":32374,"Ġveins":32375,"Ġsoaring":32376,"packs":32377,"Golden":32378,"ĠCrab":32379,"istor":32380,"ĠRPM":32381,"Ġ$$":32382,"gression":32383,"Ġjihadist":32384,"Ġgamble":32385,"Ġcareg":32386,"Ġinflated":32387,"Face":32388,"ĠFirearms":32389,"ĠEmmanuel":32390,"âĿ":32391,"Ġshocks":32392,"grab":32393,"Ġsplend":32394,"ĠHPV":32395,"abortion":32396,"Above":32397,"Entity":32398,"players":32399,"Ġcommenced":32400,"ulence":32401,"Ġfulfillment":32402,"Ġembodiments":32403,"ĠWelfare":32404,"Ġhail":32405,"Ġ<@":32406,"tten":32407,"Ġcatcher":32408,"ĠJazeera":32409,"Ġvolcano":32410,"Ġstabilize":32411,"ĠHandler":32412,"Ġintensified":32413,"ĠAbrams":32414,"Ġhumiliation":32415,"paced":32416,"605":32417,"ĠCentOS":32418,"Specific":32419,"Ġheed":32420,"ĠCAM":32421,"ĠGalile":32422,"Die":32423,"Ġabolished":32424,"ĠThomson":32425,"ĠTeachers":32426,"ĠWass":32427,"jong":32428,"ĠISBN":32429,"ĠAllies":32430,"shake":32431,"å·":32432,"vict":32433,"Howard":32434,"Ġdeem":32435,"Ġexceedingly":32436,"ĠSmartstocks":32437,"ibe":32438,"Ġdoorway":32439,"Ġcompeted":32440,"igmat":32441,"Ġnationalists":32442,"Ġgroom":32443,"ĠKeen":32444,"Ġdisposable":32445,"decl":32446,"ĠTolkien":32447,"ĠScheme":32448,"Ġbiod":32449,"Ġavid":32450,"ĠElon":32451,"agar":32452,"ĠTSA":32453,"Roman":32454,"Ġartificially":32455,"Ġadvisors":32456,"XL":32457,"ĠInferno":32458,"366":32459,"Ġtedious":32460,"ĠPhotography":32461,"ĠCarrie":32462,"Ġtrope":32463,"ĠSandra":32464,"Ġdecimal":32465,"Queen":32466,"ĠGundam":32467,"ĠOM":32468,"otech":32469,"NBA":32470,"Ġ1932":32471,"Ġentrenched":32472,"ĠMarion":32473,"Ġfraternity":32474,"Labour":32475,"Henry":32476,"Ġlatitude":32477,"Either":32478,"Ġenhances":32479,"ĠPotential":32480,"Ġshines":32481,"idad":32482,"Ġbreadth":32483,"Ġcapacities":32484,"ĠðŁĻĤ":32485,"ĠBronx":32486,"Ġsexes":32487,"Ġdifferentiation":32488,"Ġheavyweight":32489,"ĠTaj":32490,"dra":32491,"Ġmigrate":32492,"Ġexhaustion":32493,"ĠRUN":32494,"elsius":32495,"ĠCuomo":32496,"Ġguitars":32497,"Ġclones":32498,"ĠSomew":32499,"ĠPry":32500,"-------------":32501,"Ġwarranted":32502,"cycles":32503,"Ġsalvage":32504,"Ġdisks":32505,"RANT":32506,"ĠNGOs":32507,"ĠMartian":32508,"\":[{\"":32509,"Ġaddicts":32510,"ojure":32511,"illet":32512,"Ġamazingly":32513,"artments":32514,"pixel":32515,"ĠGPUs":32516,"Layout":32517,"è£":32518,"ĠTamil":32519,"ĠBasil":32520,"Ġimpartial":32521,"ĠStructure":32522,"fork":32523,"bryce":32524,"Ġridge":32525,"ĠHamburg":32526,"rious":32527,"Ġblitz":32528,"cigarettes":32529,"Ġcanned":32530,"402":32531,"Ġironically":32532,"Ġcompassionate":32533,"ĠHawkins":32534,".#":32535,"ĠCathedral":32536,"Ġrallied":32537,"internal":32538,"Ġquota":32539,"stakes":32540,"TEXT":32541,"mom":32542,"Ġcompletes":32543,"Ġ238":32544,"Ġshrug":32545,"ãĥij":32546,"ĠNinth":32547,"Ġrevise":32548,"ĠProvider":32549,"Ġtreacher":32550,"Ġquasi":32551,"ĠPRES":32552,"Ġdeposition":32553,"Ġconfidentiality":32554,"issors":32555,"Ġimbalance":32556,"Ġspanning":32557,"Ġangular":32558,"ĠCul":32559,"communication":32560,"ĠNora":32561,"ĠGenius":32562,"opter":32563,"Ġsacked":32564,"Spot":32565,"Ġfinely":32566,"ĠCHR":32567,"282":32568,"waves":32569,"Palest":32570,"ĠRohing":32571,"NL":32572,"è¿":32573,"Ġshitty":32574,"ĠScalia":32575,"475":32576,"Progress":32577,"Ġreferencing":32578,"Ġclassrooms":32579,"abee":32580,"Ġsod":32581,"hesion":32582,"708":32583,"ĠZuckerberg":32584,"ĠFinish":32585,"ĠScotia":32586,"ĠSavior":32587,"ĠInstallation":32588,"antha":32589,"(-":32590,"Ġ302":32591,"ĠPunk":32592,"Ġcrater":32593,"youtu":32594,"Ġroast":32595,"Ġinfluencing":32596,"Ġdup":32597,"ĠJR":32598,"ĠGrav":32599,"Ġstature":32600,"Ġbathrooms":32601,"Aside":32602,"Wiki":32603,"mean":32604,"ĠZak":32605,"ĠOnes":32606,"ĠNath":32607,"Ġhypert":32608,"Ġcommencement":32609,"Civil":32610,"Ġmoderately":32611,"Ġdistributors":32612,"Ġbreastfeeding":32613,"Ġ980":32614,"ĠSik":32615,"ĠCig":32616,"ĠAMER":32617,"RIP":32618,"ĠCareer":32619,"usting":32620,"Ġmessed":32621,"Ġeh":32622,"ĠJensen":32623,"/$":32624,"Ġblackmail":32625,"Ġconversions":32626,"Ġscientifically":32627,"Ġmantra":32628,"paying":32629,"Ġivory":32630,"ĠCourts":32631,"OUGH":32632,"auntlet":32633,"Serial":32634,"Brow":32635,"ĠHundreds":32636,"323":32637,"Ġpee":32638,"Ġlinux":32639,"Ġsubmer":32640,"ĠPrincipal":32641,"485":32642,"ĠDSL":32643,"ĠCousins":32644,"Ġdoctrines":32645,"ĠAthletics":32646,"Ġ315":32647,"ĠKarma":32648,"Ġattent":32649,"urger":32650,"Ġprescribe":32651,"Ġencaps":32652,"ĠCame":32653,"Ġsecretive":32654,"ĠCrimes":32655,"dn":32656,"Clean":32657,"ĠEgyptians":32658,"ĠCarpenter":32659,"Ġll":32660,"Hum":32661,"ĠMilo":32662,"Ġcapitalists":32663,"Ġbriefed":32664,"Twe":32665,"ĠBasin":32666,"elvet":32667,"Mos":32668,"Ġplunge":32669,"ĠKaiser":32670,"ĠFuj":32671,"illin":32672,"Ġsafeguards":32673,"Ġoste":32674,"ĠOpportunity":32675,"ĠMafia":32676,"ĠCalling":32677,"apa":32678,"urban":32679,"brush":32680,"illard":32681,"cé":32682,"intelligence":32683,"ĠLob":32684,"ĠDruid":32685,"Ġsmoother":32686,"Ġfooting":32687,"Ġmotorists":32688,"arcity":32689,"Ġmasculinity":32690,"Ġmism":32691,"Ġabdominal":32692,"ĠTavern":32693,"ĠRoh":32694,"Ġescapes":32695,"signed":32696,"Anthony":32697,"Ġsacrificing":32698,"Ġintimacy":32699,"Ġanterior":32700,"ĠKod":32701,"Ġmotif":32702,"Ġgraz":32703,"Ġvisualization":32704,"Ġguitarist":32705,"ĠTrotsky":32706,"magic":32707,"Dar":32708,"ĠMori":32709,"Ġwards":32710,"Ġtoilets":32711,"lest":32712,"Ġteleport":32713,"ĠSundays":32714,"ĠPlat":32715,"ETS":32716,"ĠeSports":32717,"Patrick":32718,"ĠKatherine":32719,"enko":32720,"Ġhassle":32721,"ĠMick":32722,"ggles":32723,"Ġhob":32724,"aintain":32725,"Ġairborne":32726,"Ġspans":32727,"Ġchili":32728,"Ġaperture":32729,"Ġvolunteered":32730,"ĠIncident":32731,"ĠFres":32732,"ĠVeteran":32733,"aughtered":32734,"ingo":32735,"Ġuninsured":32736,"CLOSE":32737,"Ġfuse":32738,"Ġerotic":32739,"Ġadvertise":32740,"raising":32741,"Texture":32742,"Ġattends":32743,"ĠREAL":32744,"uddled":32745,"Ġsmoot":32746,"Ġ305":32747,"ĠWillis":32748,"Ġblond":32749,"Analysis":32750,"ĠVT":32751,"onica":32752,"Ġstronghold":32753,"RF":32754,"NM":32755,".>>":32756,"Ġprosperous":32757,"Ġboasted":32758,"292":32759,"ĠManufacturing":32760,"PRESS":32761,"gren":32762,"Ġpharmacy":32763,"ĠRockefeller":32764,"kai":32765,"Ġthumbs":32766,"ĠHut":32767,"Ġmotherboard":32768,"Ġguardians":32769,"ĠAlter":32770,"llular":32771,"Ġshack":32772,"Ġwisely":32773,"Ġbackbone":32774,"erva":32775,"Ġsuicides":32776,"ĠMcGregor":32777,"ijah":32778,"Emer":32779,"ĠBrav":32780,"Ġdesignate":32781,"POST":32782,"produced":32783,"Ġcleansing":32784,"irlwind":32785,"existent":32786,"ĠHumph":32787,"ĠPayne":32788,"Ġvested":32789,"Å¡":32790,"Ġstringent":32791,"iona":32792,"Ġunsub":32793,"Ġsummed":32794,"ĠHercules":32795,"subject":32796,"ĠRagnar":32797,"ĠNos":32798,"Ġcharacterization":32799,"Ġsavvy":32800,"ĠDawson":32801,"ĠCasino":32802,"Ġfri":32803,"ĠBarrier":32804,"Ġmisinformation":32805,"Ġinsulation":32806,"Ġcorridors":32807,"Ġairplanes":32808,"ĠNoct":32809,"ahi":32810,"Ġ1916":32811,"kb":32812,"armac":32813,"Ġshun":32814,"Ġschema":32815,"Ġhorrified":32816,"Ġ239":32817,"aunders":32818,"NB":32819,"iates":32820,"erity":32821,"ĠShard":32822,"Ġrarity":32823,"Ġgrouped":32824,"ĠGhana":32825,"against":32826,"ĠBiological":32827,"ĠAware":32828,"owell":32829,"ÏĦ":32830,"ĠBeau":32831,"shaw":32832,"Hack":32833,"ĠJulius":32834,"USS":32835,"olson":32836,"auna":32837,"cru":32838,"ĠMaurice":32839,"ĠIk":32840,"Ġsequencing":32841,"Ġradicals":32842,"Ġ(?,":32843,"virtual":32844,"Ġanyways":32845,"Ġreperc":32846,"Ġhandlers":32847,"Ġhesitant":32848,"éĥ":32849,"ĠMF":32850,"plementation":32851,"associated":32852,"Ġcampaigned":32853,"ĠYue":32854,"utations":32855,"ĠYoga":32856,"Ġsimmer":32857,"Ġrods":32858,"Ġmelody":32859,"Ġconvoy":32860,"videos":32861,"Ġscreened":32862,"Neg":32863,"ochemical":32864,"Ġ())":32865,"Ġultras":32866,"Ġantip":32867,"ĠIslanders":32868,"704":32869,"Ġfetish":32870,"Ġridiculously":32871,"ĠKart":32872,"Ġmitochondrial":32873,"Ġinterfering":32874,"Builder":32875,"Ġoverfl":32876,"Ġacne":32877,"ĠMud":32878,"ĠKerr":32879,"flex":32880,"ĠPostal":32881,"ĠBaltic":32882,"477":32883,"ĠPersons":32884,"ourage":32885,"HB":32886,"ĠMuse":32887,"ĠImmortal":32888,"ĠDriving":32889,"Ġpetitions":32890,"Ġsubscript":32891,"Ġsorce":32892,"ĠProcessor":32893,"uton":32894,"Sony":32895,"Ġphon":32896,"Ġraced":32897,"ĠAnthrop":32898,"Ġdaytime":32899,"ĠExercise":32900,"Adding":32901,"Ġengages":32902,"ĠQualcomm":32903,"Ġmiracles":32904,"Ġmemes":32905,"ĠDrink":32906,"ĠOrioles":32907,"Ġhairs":32908,"ĠPolar":32909,"athom":32910,"Ġslippery":32911,"ĠRemy":32912,"Ġcaramel":32913,"ĠYEAR":32914,"Ġalk":32915,"Ign":32916,"aution":32917,"ĠMerlin":32918,"ĠCran":32919,"Ġapologies":32920,"Ġ410":32921,"Ġouting":32922,"ĠMemories":32923,"appointed":32924,"Ġcountered":32925,"uld":32926,"posing":32927,"Ġfirewall":32928,"ĠWast":32929,"ĠWet":32930,"worked":32931,"seller":32932,"Ġrepealed":32933,"ereo":32934,"assuming":32935,"BLIC":32936,"mite":32937,"ĠCEOs":32938,"ĠChapel":32939,"elligent":32940,"________________________":32941,"Dog":32942,"Ġwart":32943,"Ġsubscriber":32944,"sports":32945,"Ġbegged":32946,"ĠMV":32947,"Ġsemif":32948,"ethical":32949,"Ġpreach":32950,"Ġrevital":32951,"Ġpunitive":32952,"Ġshortcuts":32953,"Ġinstituted":32954,"ĠWarsaw":32955,"Ġabdomen":32956,"ĠKING":32957,"Ġsuperintendent":32958,"Ġfry":32959,"ĠGeo":32960,"TOR":32961,"Ġcontradictions":32962,"aptic":32963,"Ġlandscapes":32964,"bugs":32965,"Ġclust":32966,"Ġvolley":32967,"cribed":32968,"Ġtandem":32969,"Ġrobes":32970,"WHAT":32971,"Ġpromoter":32972,"Ġeloqu":32973,"reviewed":32974,"ĠDK":32975,"ĠPlato":32976,"Ġfps":32977,"Tank":32978,"ĠDerrick":32979,"Ġprioritize":32980,"asper":32981,"ĠHonduras":32982,"ĠCompleted":32983,"nec":32984,"Ġmog":32985,"nir":32986,"ĠMayo":32987,"DEF":32988,"stall":32989,"inness":32990,"ĠVolkswagen":32991,"Ġprecaution":32992,"ĠMell":32993,"iak":32994,"istries":32995,"Ġ248":32996,"Ġoverlapping":32997,"Senate":32998,"ĠEnhance":32999,"resy":33000,"racial":33001,"ORTS":33002,"ĠMormons":33003,"Strong":33004,"ĠCoch":33005,"Mexico":33006,"ĠMaduro":33007,"Ġjars":33008,"Ġcane":33009,"Wik":33010,"olla":33011,"ifference":33012,"Ġphysicist":33013,"ĠMaggie":33014,"Ġ285":33015,"Ġdepiction":33016,"ĠMcLaren":33017,"Ju":33018,"Ġslows":33019,"Ġcommissioners":33020,"ĠWillow":33021,"ĠExplos":33022,"hovah":33023,"Ġtechnician":33024,"Ġhomicides":33025,"ĠFlav":33026,"ĠTruman":33027,"Ġ10000":33028,"uctor":33029,"Ġshader":33030,"Newsletter":33031,"457":33032,"Ġrever":33033,"Ġhardened":33034,"Ġwhereabouts":33035,"Ġredevelop":33036,"Ġcarbs":33037,"Ġtravers":33038,"Ġsquirrel":33039,"Ġfollower":33040,"Ġsings":33041,"508":33042,"Ġrabbits":33043,"emonium":33044,"Ġdocumenting":33045,"Ġmisunderstood":33046,")'":33047,"Rick":33048,"ggies":33049,"Ġpremie":33050,"Ġskating":33051,"Ġpassports":33052,"Ġfists":33053,"ageddon":33054,"Haw":33055,"ACP":33056,"080":33057,"ĠThoughts":33058,"ĠCarlson":33059,"Ġpriesthood":33060,"hua":33061,"Ġdungeons":33062,"ĠLoans":33063,"Ġantis":33064,"Ġfamiliarity":33065,"ĠSabb":33066,"opal":33067,"ĠInk":33068,"strike":33069,"Ġcram":33070,"Ġlegalized":33071,"Ġcuisine":33072,"Ġfibre":33073,"Travel":33074,"ĠMonument":33075,"ODY":33076,"ethy":33077,"Ġinterstate":33078,"ĠPUR":33079,"emporary":33080,"ĠArabian":33081,"developed":33082,"Ġsaddle":33083,"Ġgithub":33084,"ĠOffer":33085,"ĠISP":33086,"rolet":33087,"ĠSUPER":33088,"ĠDenis":33089,"Ġmultiplier":33090,"Ġstirred":33091,"Interestingly":33092,"Ġcustomary":33093,"Ġbilled":33094,"hex":33095,"Ġmultiplied":33096,"Ġflipping":33097,"ĠCrosby":33098,"Ġfundamentals":33099,"iae":33100,"ĠPlayed":33101,"ĠAtom":33102,"amazon":33103,"ĠFlam":33104,"eez":33105,"activated":33106,"Ġtablespoon":33107,"Ġliberalism":33108,"ĠPalin":33109,"ĠPatel":33110,"Num":33111,"ĠTAM":33112,"Ġsurn":33113,"ĠReloaded":33114,"Ġcoined":33115,"\"],":33116,"ĠClash":33117,"ĠAgu":33118,"Ġpragmatic":33119,"ĠActivate":33120,"Ġ802":33121,"Ġtrailers":33122,"Ġsilhou":33123,"Ġprobes":33124,"Ġcircus":33125,"ĠBain":33126,"ĠLindsay":33127,"ĠAbbey":33128,"Delivery":33129,"Ġconcession":33130,"Ġgastro":33131,"ĠSprite":33132,"ÄŁ":33133,"andel":33134,"Ġgimm":33135,"Ġautobi":33136,"ĠTurtle":33137,"Ġwonderfully":33138,"ĠHaram":33139,"ĠWorldwide":33140,"ĠHandle":33141,"Ġtheorists":33142,"Ġsleek":33143,"ĠZhu":33144,"ographically":33145,"EGA":33146,"ĠOwners":33147,"aths":33148,"ĠAntarctic":33149,"natal":33150,"=\"\"":33151,"flags":33152,"````":33153,"Ġsul":33154,"Kh":33155,"Ġpotassium":33156,"Ġlineman":33157,"Ġcereal":33158,"ĠSeasons":33159,"Ġ2022":33160,"Ġmathematic":33161,"Ġastronomers":33162,"professional":33163,"Ġfares":33164,"cknowled":33165,"Ġchi":33166,"Ġyoungsters":33167,"Ġmistakenly":33168,"Ġhemisphere":33169,"ĠDivinity":33170,"rone":33171,"Ġ\",":33172,"rings":33173,"Ġattracts":33174,"vana":33175,"å¹":33176,"CAP":33177,"Ġplaylist":33178,"Ġporch":33179,"ãģ£":33180,"Ġincorporates":33181,"Ġsoak":33182,"Ġasserting":33183,"ĠTerrorism":33184,"ĠPablo":33185,"Ja":33186,"cester":33187,"Ġfearing":33188,"ĠPrayer":33189,"Ġescalated":33190,"GW":33191,"Ġrobe":33192,"ĠBrighton":33193,"acists":33194,"ĠSymphony":33195,"ĠDwarf":33196,"ĠParade":33197,"ĠLego":33198,"Ġinexpl":33199,"Ġlords":33200,"leaf":33201,"RAG":33202,"liber":33203,"Ġcigars":33204,"ĠJehovah":33205,"606":33206,"WINDOWS":33207,"ĠLiberia":33208,"ebus":33209,"Heavy":33210,"Ġlubric":33211,"ĠRW":33212,"anguages":33213,"Ġnarrowed":33214,"computer":33215,"ĠEmber":33216,"Ġmurdering":33217,"Ġdownstream":33218,"ĠTuls":33219,"ĠTables":33220,"Topic":33221,"ĠAccuracy":33222,"=/":33223,"lost":33224,"ĠRei":33225,"Ġprogresses":33226,"bear":33227,"Ġestablishments":33228,"Justin":33229,"ĠPeach":33230,"ĠGomez":33231,"å¿":33232,"ĠTriangle":33233,"Ident":33234,"ĠHive":33235,"Resources":33236,"Ġmixes":33237,"ĠAssuming":33238,"Mu":33239,"Ġhypoc":33240,"Ġsane":33241,"ĠWan":33242,"idious":33243,"Success":33244,"Ġio":33245,"Angel":33246,"Ġdangerously":33247,"ĠCreature":33248,"WORK":33249,":[":33250,"ĠKatrina":33251,"Listener":33252,"Miller":33253,"ĠIdlib":33254,"hang":33255,"Ġcircumvent":33256,"href":33257,"Ġcelestial":33258,"ĠWeeks":33259,"ĠPug":33260,"ĠDalton":33261,"Ġsubpoena":33262,"uku":33263,"Ġpersisted":33264,"pei":33265,"olding":33266,"ĠDocuments":33267,"ĠHast":33268,"ĠCENT":33269,"Ġprimer":33270,"Ġsynonymous":33271,"Ġnib":33272,"ombs":33273,"Ġnotation":33274,"ĠDish":33275,"ĠAtmosp":33276,"Ġforbid":33277,"ĠANG":33278,"pattern":33279,"los":33280,"Ġprojectiles":33281,"brown":33282,".\",":33283,"ĠVenom":33284,"Ġfiercely":33285,"ublished":33286,"ĠUran":33287,"ĠNicarag":33288,"410":33289,"ĠCAL":33290,"OTOS":33291,"ĠMiracle":33292,"ĠEnchant":33293,"Ġguarding":33294,"append":33295,"Attach":33296,"Ġleveled":33297,"Ġcondoms":33298,"ihilation":33299,"649":33300,"Ġnightmares":33301,"ĠTHEY":33302,"ĠSTART":33303,"ĠKinn":33304,"Ġroommate":33305,"Ġhygiene":33306,"opping":33307,"Job":33308,"Ġlvl":33309,"ĠVER":33310,"ĠKeeping":33311,"abetic":33312,"Ġformatting":33313,"erala":33314,"Ġrevisions":33315,"Ġresurg":33316,"Tel":33317,"ĠGoodman":33318,"353":33319,"pod":33320,"Ġindisp":33321,"ĠTranslation":33322,"Ġgown":33323,"ĠMund":33324,"Ġcis":33325,"Ġbystand":33326,"collect":33327,"ĠPunjab":33328,"actively":33329,"ĠGamb":33330,"tell":33331,"Ġimporting":33332,"gencies":33333,"Ġlocom":33334,"ĠBrill":33335,"Holy":33336,"ĠBerger":33337,"Ġshowdown":33338,"Ġresponders":33339,"ILY":33340,"Ġtakedown":33341,"leted":33342,"Ġmattered":33343,"Ġpredictive":33344,"Ġoverlay":33345,"GPU":33346,"ĠVick":33347,"Ġconveyed":33348,"Tab":33349,"peer":33350,"Scan":33351,"Ġdefensively":33352,"vae":33353,"Ġapproving":33354,"Ġtiers":33355,"ĠVia":33356,"querade":33357,"ĠSaudis":33358,"Ġdemolished":33359,"ĠProphe":33360,"Ġmono":33361,"Ġhospitality":33362,"HAM":33363,"ĠAriel":33364,"MOD":33365,"ĠTorah":33366,"Ġblah":33367,"ĠBelarus":33368,"erential":33369,"ĠTuc":33370,"Ġbanker":33371,"397":33372,"Ġmosquit":33373,"ĠScientist":33374,"ĠMusical":33375,"Ġhust":33376,"Shift":33377,"Ġtorment":33378,"Ġstandoff":33379,"Educ":33380,"ĠFog":33381,"Ġamplifier":33382,"Shape":33383,"Instance":33384,"ĠCritics":33385,"Ġdaemon":33386,"Houston":33387,"Ġmattress":33388,"ĠIDF":33389,"Ġobscene":33390,"ĠAmer":33391,"hetti":33392,"Ġcompiling":33393,"352":33394,"verett":33395,"ĠReduction":33396,"istration":33397,"ĠBlessed":33398,"ĠBachelor":33399,"316":33400,"Ġprank":33401,"ĠVulcan":33402,"dding":33403,"Ġmourning":33404,"ĠQuint":33405,"ĠBlaster":33406,"testing":33407,"Ġsediment":33408,">>>":33409,"ĠEternity":33410,"ĠWHERE":33411,"ĠMaze":33412,"Ġreacting":33413,"ĠAlv":33414,"omsday":33415,"ĠCRA":33416,"Ġtranslator":33417,"Ġbogus":33418,"atu":33419,"Website":33420,"olls":33421,"Ġbaptism":33422,"Ġsibling":33423,"ĠAutumn":33424,"vez":33425,"ãģ®é":33426,"guards":33427,"Georg":33428,"assadors":33429,"ĠFreud":33430,"Ġcontinents":33431,"ĠRegistry":33432,"Bernie":33433,"ĸļ士":33434,"Ġtolerant":33435,"ĠUW":33436,"Ġhorribly":33437,"995":33438,"ĠMIDI":33439,"Ġimpatient":33440,"ocado":33441,"eri":33442,"ĠWorst":33443,"ĠNorris":33444,"ĠTalking":33445,"Ġdefends":33446,"ensable":33447,"Ġ2021":33448,"Ġanatomy":33449,"Lew":33450,"Ġdrawer":33451,"ĠCanberra":33452,"Ġpatriotic":33453,"é¾įåĸļ士":33454,"ĠAvg":33455,"ARM":33456,"Ġundisclosed":33457,"Ġfarewell":33458,"459":33459,"bable":33460,"ĠAllison":33461,"OLOG":33462,"Ġconco":33463,"tight":33464,"ĠACPI":33465,"ĠMines":33466,"lich":33467,"ĠâĶľ":33468,"represented":33469,"200000":33470,"Ġenthusiast":33471,"OTS":33472,"bil":33473,"ĠIngredients":33474,"Ġinventor":33475,"ĠMySQL":33476,"³³³":33477,"ĠABOUT":33478,"within":33479,"Ġmk":33480,"Bul":33481,"ĠFake":33482,"Ġdraconian":33483,"Wa":33484,"helm":33485,"ĠTerran":33486,"erville":33487,"Ġcommonplace":33488,"SIZE":33489,"Ġ\"<":33490,"replace":33491,"ographs":33492,"ĠSELECT":33493,"incible":33494,"ĠMostly":33495,"ĠSheffield":33496,"ĠIDE":33497,"uggle":33498,"Ġcitations":33499,"hurst":33500,"ĠUnix":33501,"Ġunleash":33502,"ĠPiper":33503,"ĠNano":33504,"Ġsuccumb":33505,"Ġreluctance":33506,"Ġ2500":33507,"ĠMerchant":33508,"Ġwiret":33509,"Ġcombos":33510,"ĠBirthday":33511,"Ġcharcoal":33512,"ĠUPS":33513,"ĠFairfax":33514,"Ġdriveway":33515,"ĠTek":33516,"ĠPitch":33517,"overe":33518,"Ġtechnicians":33519,"ĠActual":33520,"flation":33521,"ĠFiscal":33522,"ĠEmpty":33523,"anamo":33524,"Ġmagnesium":33525,"Ġslut":33526,"Ġgrowers":33527,"Investigators":33528,"():":33529,"ĠSatellite":33530,"ĠKeynes":33531,"missive":33532,"lane":33533,"Ġborough":33534,"344":33535,"ĠTEAM":33536,"ĠBethesda":33537,"CV":33538,"hower":33539,"ĠRAD":33540,"Ġchant":33541,"ĠRiy":33542,"Ġcompositions":33543,"Ġmildly":33544,"Ġmeddling":33545,"Ġagility":33546,"aneers":33547,"501":33548,"Ġsynth":33549,"linger":33550,"291":33551,"Ġexclaimed":33552,"Party":33553,"Ġcontamin":33554,"ĠManor":33555,"ĠRespond":33556,"Ġpraising":33557,"Ġmanners":33558,"fleet":33559,"Summer":33560,"ĠLynd":33561,"ĠDefinitely":33562,"grim":33563,"Ġbowling":33564,"stri":33565,"çĽ":33566,"ynt":33567,"Ġmandates":33568,"DIV":33569,"Ġreconcile":33570,"views":33571,"ĠDamon":33572,"vette":33573,"Flo":33574,"ĠGreatest":33575,"ilon":33576,"icia":33577,"Ġportrayal":33578,"Ġcushion":33579,"504":33580,"1979":33581,"ossal":33582,"Applic":33583,"scription":33584,"Ġmitigation":33585,"ATS":33586,"pac":33587,"Ġerased":33588,"Ġdeficiencies":33589,"ĠHollande":33590,"ĠXu":33591,"Ġbred":33592,"Ġpregnancies":33593,"femin":33594,"Ġemph":33595,"Ġplanners":33596,"Ġoutper":33597,"uttering":33598,"Ġperpetrator":33599,"Ġmotto":33600,"ĠEllison":33601,"ĠNEVER":33602,"Ġadmittedly":33603,"ARI":33604,"ĠAzerbaijan":33605,"Ġmillisec":33606,"Ġcombustion":33607,"ĠBottle":33608,"ĠLund":33609,"ĠPs":33610,"ĠDress":33611,"Ġfabricated":33612,"Ġbattered":33613,"Ġsidel":33614,"ĠNotting":33615,"Foreign":33616,"ĠJerome":33617,"020":33618,"ĠArbit":33619,"Ġknots":33620,"ĠRIGHT":33621,"Moving":33622,"ãģĻ":33623,"Ġsurgeries":33624,"Ġcourthouse":33625,"Ġmastered":33626,"Ġhovering":33627,"ĠBran":33628,"ĠAlison":33629,"Ġsafest":33630,"military":33631,"Ġbullied":33632,"Ġbarrage":33633,"Reader":33634,"ESE":33635,"ĠGeographic":33636,"Tools":33637,"314":33638,"ĠGeek":33639,"roth":33640,"glers":33641,"ĠFIN":33642,"Ïģ":33643,"ĠAston":33644,"altern":33645,"488":33646,"Ġveterin":33647,"Gamer":33648,"Ġintel":33649,"renches":33650,"Shield":33651,"Ġamnesty":33652,"ĠBhar":33653,"Ġpiled":33654,"Ġhonorable":33655,"ĠInstitutes":33656,"Ġsoaked":33657,"Ġcoma":33658,"ĠEFF":33659,"341":33660,"bytes":33661,"ĠGmail":33662,"lein":33663,"ĠCanadiens":33664,"material":33665,"Il":33666,"Ġinstructors":33667,"ĠKY":33668,"Ġconceive":33669,"ubb":33670,"ĠPossible":33671,"Ġeasing":33672,"ĠChristina":33673,"Ġcaric":33674,"ĠHDR":33675,"ROM":33676,"Ġshovel":33677,"delete":33678,"Ġpuff":33679,"ĠChanging":33680,"Ġseamlessly":33681,"Attribute":33682,"Ġacquisitions":33683,"akery":33684,"ĠEF":33685,"Ġautistic":33686,"ĠTakes":33687,"ĠPowder":33688,"ĠStir":33689,"510":33690,"ĠBubble":33691,"settings":33692,"ĠFowler":33693,"Ġmustard":33694,"Ġmoreover":33695,"Ġcopyrighted":33696,"ĠLEDs":33697,"1500":33698,"æī":33699,"ĠHIS":33700,"enf":33701,"Ġcustod":33702,"ĠHuck":33703,"Gi":33704,"Ġimg":33705,"Answer":33706,"Ct":33707,"jay":33708,"ĠInfrastructure":33709,"Ġfederally":33710,"Loc":33711,"Ġmicrobes":33712,"Ġoverrun":33713,"dds":33714,"otent":33715,"adiator":33716,">>>>>>>>":33717,"Ġtornado":33718,"Ġadjud":33719,"Ġintrigued":33720,"Ġsi":33721,"ĠRevelation":33722,"progress":33723,"Ġburglary":33724,"ĠSaiyan":33725,"ĠKathy":33726,"Ġserpent":33727,"ĠAndreas":33728,"Ġcompel":33729,"essler":33730,"ĠPlastic":33731,"ĠAdvent":33732,"ĠPositive":33733,"ĠQt":33734,"ĠHindus":33735,"registered":33736,"ularity":33737,"Ġrighteousness":33738,"Ġdemonic":33739,"uitive":33740,"ĠBDS":33741,"ĠGregg":33742,"cia":33743,"ĠCrusade":33744,"ĠSinai":33745,"WARE":33746,"+(":33747,"Ġmell":33748,"Ġderail":33749,"yards":33750,"Ast":33751,"Ġnoticeably":33752,"ĠOber":33753,"Ram":33754,"Ġunnoticed":33755,"Ġseq":33756,"avage":33757,"Ts":33758,"Ġ640":33759,"Ġconcede":33760,"Ġ])":33761,"Fill":33762,"Ġcaptivity":33763,"ĠImprovement":33764,"ĠCrusader":33765,"araoh":33766,"MAP":33767,"æĹ":33768,"Ġstride":33769,"always":33770,"Fly":33771,"Nit":33772,"Ġalgae":33773,"ĠCooking":33774,"ĠDoors":33775,"Malley":33776,"Ġpolicemen":33777,"ãģį":33778,"Ġastronaut":33779,"accessible":33780,"495":33781,"ĠRAW":33782,"cliffe":33783,"udicrous":33784,"Ġdepended":33785,"alach":33786,"Ġventures":33787,"rake":33788,"Ġtits":33789,"ĠHou":33790,"Ġcondom":33791,"ormonal":33792,"Ġindent":33793,"Ġuploading":33794,"Footnote":33795,"Important":33796,"Ġ271":33797,"Ġmindful":33798,"Ġcontends":33799,"Cra":33800,"Ġcalibr":33801,"ĠOECD":33802,"plugin":33803,"Fat":33804,"ĠISS":33805,"ĠDynamics":33806,"ansen":33807,"686":33808,"'),":33809,"Ġsprite":33810,"Ġhandheld":33811,"ĠHipp":33812,"=~=~":33813,"Trust":33814,"Ġsemantics":33815,"ĠBundes":33816,"ĠReno":33817,"ĠLiterature":33818,"sense":33819,"Gary":33820,"ĠAeg":33821,"ĠTrin":33822,"EEK":33823,"Ġcleric":33824,"ĠSSH":33825,"Ġchrist":33826,"Ġinvading":33827,"ibu":33828,"Ġenum":33829,"aura":33830,"Ġallege":33831,"ĠIncredible":33832,"BBC":33833,"Ġthru":33834,"Ġsailed":33835,"Ġemulate":33836,"Ġinsecurity":33837,"Ġcrou":33838,"Ġaccommodations":33839,"Ġincompetent":33840,"Ġslips":33841,"ĠEarthqu":33842,"sama":33843,"ILLE":33844,"ĠiPhones":33845,"asaki":33846,"Ġbye":33847,"Ġard":33848,"Ġextras":33849,"Ġslaughtered":33850,"Ġcrowdfunding":33851,"resso":33852,"Ġfilib":33853,"ĠERROR":33854,"ĠTLS":33855,"egg":33856,"ĠItal":33857,"Ġenlist":33858,"ĠCatalonia":33859,"ĠScots":33860,"Ġsergeant":33861,"Ġdissolve":33862,"NH":33863,"Ġstandings":33864,"rique":33865,"IQ":33866,"Ġbeneficiary":33867,"Ġaquarium":33868,"YouTube":33869,"ĠPowerShell":33870,"Ġbrightest":33871,"ĠWarrant":33872,"Sold":33873,"Writing":33874,"Ġbeginnings":33875,"ĠReserved":33876,"ĠLatinos":33877,"heading":33878,"Ġ440":33879,"Ġrooftop":33880,"ATING":33881,"Ġ390":33882,"VPN":33883,"Gs":33884,"kernel":33885,"turned":33886,"Ġpreferable":33887,"Ġturnovers":33888,"ĠHels":33889,"Sa":33890,"ĠShinji":33891,"veh":33892,"ĠMODULE":33893,"Viol":33894,"Ġexiting":33895,"Ġjab":33896,"ĠVanilla":33897,"Ġacron":33898,"ĠGap":33899,"bern":33900,"Ak":33901,"ĠMcGu":33902,"Ġendlessly":33903,"ĠFarage":33904,"ĠNoel":33905,"Va":33906,"MK":33907,"Ġbrute":33908,"ĠKru":33909,"ĠESV":33910,"ĠOlivia":33911,"âĢł":33912,"ĠKaf":33913,"Ġtrusting":33914,"Ġhots":33915,"324":33916,"Ġmalaria":33917,"Ġjson":33918,"Ġpounding":33919,"ortment":33920,"Country":33921,"Ġpostponed":33922,"Ġunequiv":33923,"?),":33924,"ĠRooney":33925,"udding":33926,"ĠLeap":33927,"urrence":33928,"shapeshifter":33929,"ĠHAS":33930,"osate":33931,"Ġcavern":33932,"Ġconservatism":33933,"ĠBAD":33934,"Ġmileage":33935,"Ġarresting":33936,"Vaults":33937,"Ġmixer":33938,"Democratic":33939,"ĠBenson":33940,"Ġauthored":33941,"8000":33942,"Ġproactive":33943,"ĠSpiritual":33944,"tre":33945,"Ġincarcerated":33946,"ĠSort":33947,"Ġpeaked":33948,"Ġwielding":33949,"reciation":33950,"×Ļ×":33951,"Patch":33952,"ĠEmmy":33953,"Ġexqu":33954,"tto":33955,"ĠRatio":33956,"ĠPicks":33957,"ĠGry":33958,"phant":33959,"Ġfret":33960,"Ġethn":33961,"Ġarchived":33962,"%-":33963,"cases":33964,"ĠBlaze":33965,"Ġimb":33966,"cv":33967,"yss":33968,"imony":33969,"Ġcountdown":33970,"Ġawakening":33971,"ĠTunisia":33972,"ĠRefer":33973,"ĠMJ":33974,"Ġunnatural":33975,"ĠCarnegie":33976,"izen":33977,"ĠNuggets":33978,"hess":33979,"Ġevils":33980,"647":33981,"Ġintroductory":33982,"loving":33983,"ĠMcMahon":33984,"Ġambiguity":33985,"Label":33986,"ĠAlmighty":33987,"Ġcoloring":33988,"ĠClaus":33989,"setting":33990,"NULL":33991,"ĠFavorite":33992,"ĠSIG":33993,">(":33994,"ĠShiva":33995,"ĠMayer":33996,"Ġstormed":33997,"ĠCoverage":33998,"weapons":33999,"igham":34000,"Ġunanswered":34001,"Ġleve":34002,"Ġcoy":34003,"cas":34004,"bags":34005,"asured":34006,"Seattle":34007,"ĠSantorum":34008,"serious":34009,"Ġcourageous":34010,"ĠSoup":34011,"Ġconfiscated":34012,"Ġ///":34013,"Ġunconventional":34014,"Ġmoms":34015,"ĠRohingya":34016,"ĠOrchestra":34017,"ĠPotion":34018,"Ġdiscredit":34019,"ĠFIL":34020,"fixed":34021,"ĠDeer":34022,"doi":34023,"ĠDimension":34024,"Ġbureaucrats":34025,"eteen":34026,"ĠactionGroup":34027,"ohm":34028,"Ġbumps":34029,"ĠUtility":34030,"Ġsubmarines":34031,"renheit":34032,"research":34033,"ĠShapiro":34034,"Ġsketches":34035,"Ġdeceptive":34036,"ĠVil":34037,"esame":34038,"ĠEssentially":34039,"Ġrampage":34040,"isky":34041,"Ġmuttered":34042,"thritis":34043,"Ġ236":34044,"fet":34045,"bars":34046,"Ġpupil":34047,"ĠThou":34048,"oS":34049,"song":34050,"Ġfractured":34051,"Ġrevert":34052,"picture":34053,"Ġcriterion":34054,"usher":34055,"Ġrepercussions":34056,"ĠVintage":34057,"ĠSuperintendent":34058,"Officers":34059,"Ġflagged":34060,"Ġblames":34061,"Ġinverse":34062,"ographers":34063,"Ġmakeshift":34064,"Ġdevoid":34065,"Ġfossils":34066,"ĠAristotle":34067,"ĠFunds":34068,"Ġdepleted":34069,"ĠFlu":34070,"ĠYuan":34071,"Ġwoes":34072,"Ġlipid":34073,"Ġsitu":34074,"requisites":34075,"Ġfurnish":34076,"ĠSamar":34077,"Ġshameful":34078,"Ġadversely":34079,"Ġadept":34080,"Ġremorse":34081,"Ġmurderous":34082,"uckles":34083,"ĠESL":34084,"Ġ314":34085,"sent":34086,"Ġredef":34087,"ĠCache":34088,"ĠPurs":34089,"igans":34090,"Ġ460":34091,"Ġprescriptions":34092,"Ġfres":34093,"Fuck":34094,"ocrates":34095,"Twenty":34096,"ĠWeird":34097,"ĠToggle":34098,"ĠCalled":34099,"itizens":34100,"Ġpoultry":34101,"Ġharvesting":34102,"ãĤ¦ãĤ¹":34103,"Bottom":34104,"Ġcautioned":34105,"tn":34106,"396":34107,"ĠNikki":34108,"Ġevaluations":34109,"Ġharassing":34110,"Ġbindings":34111,"ĠMonetary":34112,"Ġhitters":34113,"Ġadversary":34114,"unts":34115,"Ġsetback":34116,"Ġencrypt":34117,"ĠCait":34118,"Ġlows":34119,"enges":34120,"ĠNorn":34121,"Ġbulbs":34122,"Ġbottled":34123,"ĠVoyager":34124,"317":34125,"Ġspheres":34126,"politics":34127,"Ġsubtract":34128,"Ġsensations":34129,"Ġappalling":34130,"Ġ316":34131,"Ġenvironmentally":34132,"ĠSTEM":34133,"Ġpublishes":34134,"560":34135,"Ġdiligence":34136,"484":34137,"Ġadvises":34138,"Ġpetrol":34139,"Ġimagining":34140,"Ġpatrols":34141,"ĠInteger":34142,"ĠAshes":34143,"actus":34144,"ĠRadiant":34145,"ĠLT":34146,"itability":34147,"htaking":34148,"Setting":34149,"Ġnuanced":34150,"ĠReef":34151,"ĠDevelopers":34152,"Ni":34153,"pieces":34154,"990":34155,"License":34156,"Ġlowers":34157,"ĠOttoman":34158,"327":34159,"ooo":34160,"Ġquitting":34161,"markets":34162,"Behind":34163,"Ġbasin":34164,"Ġdocs":34165,"anie":34166,"flash":34167,"ctl":34168,"Ġcivilized":34169,"ĠFukushima":34170,"\"],\"":34171,"ĠKS":34172,"ĠHonestly":34173,"arat":34174,"Ġconstructs":34175,"ĠLans":34176,"ĠDire":34177,"ĠLIKE":34178,"ĠTrouble":34179,"Ġwithholding":34180,"ĠOblivion":34181,"Ġsanity":34182,"anya":34183,"Const":34184,"Ġgrocer":34185,"ĠCelsius":34186,"Ġrecounted":34187,"ĠWife":34188,"Border":34189,"atered":34190,"happy":34191,"Ġspoiler":34192,"Ġlogically":34193,"Hall":34194,"Ġsucceeding":34195,"Ġpolymorph":34196,"Ġaxes":34197,"ĠShotgun":34198,"ĠSlim":34199,"ĠPrinciples":34200,"ĠLeth":34201,"arta":34202,"Ġscor":34203,"Screenshot":34204,"Ġrelaxation":34205,"#$#$":34206,"Ġdeterrent":34207,"iddy":34208,"Ġpowerless":34209,"Ġlesbians":34210,"Ġchords":34211,"ĠEdited":34212,"selected":34213,"Ġseparatists":34214,"0002":34215,"Ġairspace":34216,"Ġturnaround":34217,"Ġcunning":34218,"PATH":34219,"Poly":34220,"Ġbombed":34221,"Ġtion":34222,"xs":34223,"Ġwithhold":34224,"Ġwaged":34225,"ĠLiberties":34226,"Flag":34227,"Ġcomforting":34228,"454":34229,"ĠIris":34230,"arers":34231,"Ġrag":34232,"Ġrelocated":34233,"ĠGuarant":34234,"Ġstrategically":34235,"Ġgamma":34236,"uberty":34237,"ĠLockheed":34238,"gres":34239,"Ġgrilled":34240,"ĠLowe":34241,"stats":34242,"ĠRocks":34243,"Ġsensing":34244,"Ġrenting":34245,"ĠGeological":34246,"اØ":34247,"otrop":34248,"Ġsew":34249,"Ġimproperly":34250,"486":34251,"Ġâĸł":34252,"Ġstarving":34253,"ĠBj":34254,"Discussion":34255,"328":34256,"ĠCombo":34257,"ĠFixes":34258,"NAT":34259,"Ġstriving":34260,"thora":34261,"Ġharvested":34262,"ĠPing":34263,"Ġplayful":34264,"Ġavenues":34265,"Ġoccupational":34266,"Ġwakes":34267,"ĠCourier":34268,"Ġdrummer":34269,"ĠBrowser":34270,"ĠHouth":34271,"itu":34272,"Ġapparel":34273,"paste":34274,"Ġhunted":34275,"ĠSecondly":34276,"lain":34277,"XY":34278,"ĠPIN":34279,"icons":34280,"Ġcocktails":34281,"Ġsizable":34282,"Ġhurdles":34283,"estinal":34284,"ĠRecreation":34285,"Ġeco":34286,"648":34287,"ĠDied":34288,"mint":34289,"Ġfingerprints":34290,"Ġdispose":34291,"ĠBosnia":34292,"tsy":34293,"2200":34294,"Ġinspected":34295,"ĠFou":34296,"Ġfuss":34297,"Ġambush":34298,"ĠRak":34299,"Ġmanifested":34300,"Prosecut":34301,"Ġsuffice":34302,"rences":34303,"Ġcompensated":34304,"ĠCyrus":34305,"Ġgenus":34306,"ĠWolverine":34307,"ĠTrends":34308,"Ġhikes":34309,"ĠSeen":34310,"Ġenrol":34311,"Cold":34312,"Ġpolitely":34313,"ĠSlav":34314,"ĠRupert":34315,"Ġeyewitness":34316,"ĠAlto":34317,"Ġuncomp":34318,"Ġposterior":34319,"Must":34320,"ĠHerz":34321,"Ġprogressively":34322,"Ġ234":34323,"Ġindifference":34324,"ĠCunningham":34325,"Ġacademia":34326,"Ġsewer":34327,"Ġastounding":34328,"ĠAES":34329,"rather":34330,"Ġeldest":34331,"Ġclimbs":34332,"ĠAdds":34333,"Ġoutcry":34334,"Ġcontag":34335,"ĠHouses":34336,"Ġpept":34337,"ĠMelania":34338,"interested":34339,"ĠUCH":34340,"ĠRoots":34341,"ĠHubbard":34342,"ĠTBD":34343,"ĠRomanian":34344,"filename":34345,"Stone":34346,"ĠImpl":34347,"Ġchromosome":34348,"Cle":34349,"dx":34350,"Ġscrambled":34351,"ĠPt":34352,"Ġ242":34353,"OPLE":34354,"Ġtremendously":34355,"Street":34356,"Ġcraving":34357,"Ġbundled":34358,"ĠRG":34359,"pipe":34360,"Ġinjuring":34361,"Ġarcane":34362,"Particip":34363,"ĠHeroic":34364,"sty":34365,"Ġtopping":34366,"ĠTempest":34367,"rentices":34368,"bh":34369,"Ġparanoia":34370,"ĠUnicode":34371,"Ġegregious":34372,"Ġ\\'":34373,"ĠOswald":34374,"Ġgravel":34375,"ĠSimpsons":34376,"Ġbland":34377,"ĠGuantanamo":34378,"Writer":34379,"liners":34380,"ĠDice":34381,"JC":34382,"Ġparity":34383,"Ġsided":34384,"Ġ237":34385,"ĠPyrrha":34386,"atters":34387,"dk":34388,"Fine":34389,"compan":34390,"Ġformulated":34391,"ĠIdol":34392,"ilers":34393,"hemoth":34394,"ĠFav":34395,"Ġintrusion":34396,"Ġcarrots":34397,"ĠLayer":34398,"ĠHacker":34399,"Ġ----------------":34400,"Ġmoderation":34401,"éģ":34402,"ococ":34403,"Ġcharacterize":34404,"ĠTeresa":34405,"Ġsocioeconomic":34406,"Ġperk":34407,"ĠParticipation":34408,"training":34409,"ĠPaulo":34410,"phys":34411,"Ġtrustworthy":34412,"Ġembodied":34413,"ĠMerch":34414,"currency":34415,"ĠPriority":34416,"Ġteasing":34417,"Ġabsorbing":34418,"Ġunfinished":34419,"ĠComparison":34420,"Ġdisple":34421,"writers":34422,"Ġprofessions":34423,"ĠPenguin":34424,"Ġangrily":34425,"ĠLINK":34426,"688":34427,"ĠCorrespond":34428,"Ġprevailed":34429,"Ġcartel":34430,"lp":34431,"asms":34432,"ĠRedemption":34433,"ĠIslamists":34434,"effects":34435,"dose":34436,"ĠLatter":34437,"ĠHalifax":34438,"Ġvas":34439,"ĠTopics":34440,"ĠNamed":34441,"advertising":34442,"zza":34443,"ICES":34444,"Ġretarded":34445,"achable":34446,"ĠPuppet":34447,"ĠItemLevel":34448,"Ġretract":34449,"Ġidentifiable":34450,"Aaron":34451,"ĠBuster":34452,"sol":34453,"helle":34454,"assemb":34455,"Hope":34456,"ranged":34457,"Ba":34458,"ĠPurch":34459,"éĢ":34460,"ĠSiri":34461,"Ġarrivals":34462,"Ġ1912":34463,"Ġshortened":34464,"Ġ312":34465,"Ġdiscrepancy":34466,"ĠTemperature":34467,"ĠWalton":34468,"Ġkinderg":34469,"polit":34470,"Ġremix":34471,"Ġconnectors":34472,"ãĥĺãĥ©":34473,"ĠKazakhstan":34474,"dominated":34475,"Ġsugars":34476,"imble":34477,"ĠPanic":34478,"ĠDemand":34479,"ĠColony":34480,"onen":34481,"ĠMER":34482,"775":34483,"uria":34484,"azaar":34485,"ĠDegree":34486,"Pri":34487,"Ġsunshine":34488,"Ġ251":34489,"Ġpsychedelic":34490,"Ġdigitally":34491,"ĠBraun":34492,"Ġshimmer":34493,"Ġshave":34494,"ĠTelesc":34495,"ĠAstral":34496,"ĠVenezuelan":34497,"ĠOG":34498,"Ġcrawling":34499,"Integ":34500,"ĠFeather":34501,"Ġunfolding":34502,"Ġappropriation":34503,"Ġè£ıè":34504,"ĠMobility":34505,"ĠNey":34506,"-.":34507,"bilt":34508,"LIN":34509,"ĠTube":34510,"ĠConversely":34511,"Ġkeyboards":34512,"ĠCao":34513,"Ġoverth":34514,"Ġlaure":34515,">>\\":34516,"ĠViper":34517,"acha":34518,"Offset":34519,"ĠRaleigh":34520,"ĠJae":34521,"Jordan":34522,"jp":34523,"Ġtotalitarian":34524,"Connector":34525,"Ġobserves":34526,"ĠSpartan":34527,"ĠImmediately":34528,"ĠScal":34529,"Cool":34530,"Ġtaps":34531,"Ġroar":34532,"Past":34533,"Ġchars":34534,"ĠBender":34535,"ĠSheldon":34536,"Ġpainter":34537,"Ġbeacon":34538,"ĠCreatures":34539,"Ġdownturn":34540,"Ġhinder":34541,"ĠAndromeda":34542,"ÃĽ":34543,"ccoli":34544,"ĠFitness":34545,"etrical":34546,"Ġutilizes":34547,"Ġsenate":34548,"Ġensemble":34549,"Ġcheers":34550,"TW":34551,"Ġaffluent":34552,"kil":34553,"rylic":34554,"ordering":34555,"Computer":34556,"Ġgruesome":34557,"ostics":34558,"ĠUbisoft":34559,"ĠKelley":34560,"Ġwrench":34561,"Ġbourgeoisie":34562,"IBLE":34563,"ĠPreston":34564,"worn":34565,"arist":34566,"reating":34567,"Ġstained":34568,"arine":34569,"Ġslime":34570,"ENN":34571,"Ġchests":34572,"Ġgroundwater":34573,"annot":34574,"ĠTray":34575,"ĠLocke":34576,"ĠCTR":34577,"Ġdudes":34578,"ĠExternal":34579,"ĠDecoder":34580,"Ġparamed":34581,"ĠMedline":34582,"809":34583,"ĠDinner":34584,"rupal":34585,"gz":34586,"ĠGum":34587,"ĠDemo":34588,"jee":34589,"Ġdh":34590,"berman":34591,"archs":34592,"Ġenqu":34593,"ĠEpstein":34594,"Ġdevastation":34595,"Ġfriendships":34596,"ĠArd":34597,"Ġ231":34598,"ĠRubin":34599,"ĠDistance":34600,"Ġspurred":34601,"Ġdossier":34602,"Ġoverlooking":34603,"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\":34604,"Forest":34605,"ĠComes":34606,"\\\",":34607,"ĠIranians":34608,"Ġfixtures":34609,"Laughs":34610,"Ġcurry":34611,"ĠKingston":34612,"Ġsquash":34613,"Ġcatalogue":34614,"Ġabnormalities":34615,"Ġdigestive":34616,".........":34617,"Ġsubordinate":34618,"ogly":34619,"Ġ249":34620,"Middle":34621,"Ġmassac":34622,"Ġburgers":34623,"Ġdownstairs":34624,"Ġ1931":34625,"394":34626,"ĠVG":34627,"Ġlasers":34628,"ĠSikh":34629,"ĠAlexa":34630,"derived":34631,"Ġcyclist":34632,"ãģ®éŃĶ":34633,"oneliness":34634,"!!!!!!!!":34635,"Ġbuffs":34636,"legate":34637,"Ġraping":34638,"Ġrecommending":34639,"rored":34640,"Ġmulticultural":34641,"unique":34642,"Ġbusinessmen":34643,"Ġuneasy":34644,"ĠMAP":34645,"Ġdispersed":34646,"cipline":34647,"Jess":34648,"ĠKerala":34649,"å§":34650,"Ġabstraction":34651,"Surv":34652,"Uh":34653,"Ġprinters":34654,"ija":34655,"owder":34656,"Ġanalogous":34657,"ĠASP":34658,"afer":34659,"Ġunfolded":34660,"Ġleveling":34661,"Ġbreached":34662,"ĠHearing":34663,"Ġnat":34664,"Ġtranslating":34665,"critical":34666,"Ġantagonist":34667,"ĠYesterday":34668,"Ġfuzzy":34669,"wash":34670,"mere":34671,"Ġbewild":34672,"ĠMae":34673,"Virgin":34674,"phrase":34675,"Ġsignaled":34676,"ĠHIGH":34677,"Ġprotester":34678,"Ġgarner":34679,"unknown":34680,"Ġkay":34681,"Ġabducted":34682,"Ġstalking":34683,"amn":34684,"Ġdeserving":34685,"ĠRiv":34686,"ĠJorge":34687,"Ġscratching":34688,"ĠSaving":34689,"iping":34690,"Ġtease":34691,"Ġmissionary":34692,"ĠMorrow":34693,"TIME":34694,"Present":34695,"Ġchemotherapy":34696,"terness":34697,"ĠHomes":34698,"ĠPurdue":34699,"Ġstaunch":34700,"ĠWhitney":34701,"ĠTHERE":34702,"μ":34703,"iatus":34704,"ĠErnest":34705,"ĠDeploy":34706,"Ġcoveted":34707,"FML":34708,"ĠDialogue":34709,"Ġexited":34710,"fruit":34711,"Ġnerd":34712,"\":\"\",\"":34713,"Ġvivo":34714,"ruly":34715,"460":34716,"ĠAmen":34717,"rehensible":34718,"Ġâĺ":34719,"DIR":34720,"Ġadherence":34721,"Ġchew":34722,"ĠCoke":34723,"ĠSergei":34724,"digital":34725,"ĠNeck":34726,"gently":34727,"enthal":34728,"/)":34729,"Ġweary":34730,"Ġguise":34731,"ĠConcord":34732,"ĠOnion":34733,"atcher":34734,"Ġbinge":34735,"ĠDirective":34736,"Ġmanned":34737,"ansk":34738,"Ġillusions":34739,"Ġbillionaires":34740,"383":34741,"olyn":34742,"odynamic":34743,"ĠWheat":34744,"ĠAlic":34745,"Ġcoloured":34746,"ĠNAFTA":34747,"abo":34748,"Ġmacros":34749,"independent":34750,"sweet":34751,"Ġspac":34752,"ĠKabul":34753,"ĠÄ":34754,"eme":34755,"Ġdictated":34756,"Ġshouts":34757,"={":34758,"Ġripping":34759,"ĠShay":34760,"ĠCricket":34761,"directed":34762,"Ġanalysed":34763,"ĠWARRANT":34764,"agons":34765,"ĠBlazers":34766,"Ġcheered":34767,"Ġarithmetic":34768,"ĠTanz":34769,"373":34770,"ĠFlags":34771,"Ġ295":34772,"Ġwitches":34773,"ĠIncluded":34774,"ĠGained":34775,"ĠBlades":34776,"Gam":34777,"ĠSamantha":34778,"ĠAtlantis":34779,"ĠPratt":34780,"Ġspoiled":34781,"ĠIB":34782,"ĠRamirez":34783,"Probably":34784,"rero":34785,"ĠNg":34786,"ĠWarlock":34787,"tp":34788,"Ġoverhe":34789,"Ġadministrations":34790,"Ġtint":34791,"Ġregiment":34792,"Ġpistols":34793,"Ġblankets":34794,"Ġepist":34795,"Ġbowls":34796,"Ġhydraulic":34797,"Ġdean":34798,"Ġjung":34799,"Ġascend":34800,"705":34801,"ĠSantiago":34802,"î":34803,"Ġunavoid":34804,"ĠShaman":34805,"reb":34806,"Ġstemming":34807,"998":34808,"ĠMG":34809,"sticks":34810,"esthesia":34811,"ERO":34812,"Ġmorbid":34813,"ĠGrill":34814,"ĠPoe":34815,"anyl":34816,"Ġdeleting":34817,"ĠSurveillance":34818,"Ġdirectives":34819,"Ġiterations":34820,"ĠRox":34821,"ĠMilky":34822,"Father":34823,"Ġpatented":34824,"447":34825,"Ġprecursor":34826,"Ġmaiden":34827,"ĠPhen":34828,"ĠVegan":34829,"ĠPatent":34830,"Kelly":34831,"Redditor":34832,"Ġnods":34833,"Ġventilation":34834,"ĠSchwarz":34835,"Ġwizards":34836,"Ġominous":34837,"ĠHeads":34838,"ĠBG":34839,"Ġlumber":34840,"ĠSpiel":34841,"ĠisEnabled":34842,"Ġancestral":34843,"ĠShips":34844,"Ġwrestler":34845,"phi":34846,"Ġyuan":34847,"ĠRebellion":34848,"Ġiceberg":34849,"Ġmagically":34850,"Ġdiversion":34851,"arro":34852,"ythm":34853,"ĠRiders":34854,"ĠRobbie":34855,"ĠKara":34856,"ĠMaintenance":34857,"ĠHerb":34858,"Ġharms":34859,"packed":34860,"ĠFeinstein":34861,"Ġmarrying":34862,"Ġblending":34863,"ĠRates":34864,"Ġ1880":34865,"Ġwrink":34866,"ĠUnch":34867,"ĠTorch":34868,"described":34869,"Ġhumanoid":34870,"ilitating":34871,"ĠConv":34872,"ĠFeld":34873,"IGHTS":34874,"Ġwhistleblower":34875,"ortmund":34876,"etsy":34877,"arrett":34878,"ĠMono":34879,"ĠIke":34880,"ĠCNBC":34881,"ĠWAY":34882,"ĠMDMA":34883,"ĠIndividuals":34884,"Ġsupplemental":34885,"Ġpowerhouse":34886,"ĠStru":34887,"Focus":34888,"aphael":34889,"ĠColleg":34890,"atti":34891,"ZA":34892,"Ġperenn":34893,"ĠSignature":34894,"ĠRodney":34895,"Ġcubes":34896,"iddled":34897,"ĠDante":34898,"ĠINV":34899,"ilingual":34900,"ĠCth":34901,"Ġsofa":34902,"Ġintimidate":34903,"ĠRoe":34904,"ĠDiplom":34905,"ĠCountries":34906,"ayson":34907,"Ġextradition":34908,"Ġdisabling":34909,"ĠCardiff":34910,"Ġmemorandum":34911,"ĠTrace":34912,"Ġ???":34913,"sector":34914,"ĠRouhani":34915,"ĠYates":34916,"ĠFreeze":34917,"Ġbladder":34918,"Motor":34919,"ĠPromise":34920,"antasy":34921,"Ġforeseeable":34922,"ĠCologne":34923,"container":34924,"ĠTrees":34925,"ĠGors":34926,"ĠSinclair":34927,"Ġbarring":34928,"keye":34929,"Ġslashed":34930,"ĠStatistical":34931,"éĩ":34932,"Ġâĸº":34933,"Allows":34934,"Ġhumility":34935,"Ġdrilled":34936,"ĠFurn":34937,"443":34938,"Ġsewage":34939,"Ġhomepage":34940,"Ġcourtyard":34941,"Ġvile":34942,"Ġsubsidiaries":34943,"ajo":34944,"directory":34945,"Ġammon":34946,"Vers":34947,"charges":34948,"Ġ}}":34949,"ĠChains":34950,"Ġ246":34951,"nob":34952,"Ġpercept":34953,"Ġgrit":34954,"Ġfishermen":34955,"ĠIraqis":34956,"ĠDISTR":34957,"ĠFULL":34958,"ĠEvaluation":34959,"graph":34960,"atial":34961,"Ġcooperating":34962,"Ġmelan":34963,"Ġenlightened":34964,"Ġali":34965,"tailed":34966,"Ġsalute":34967,"Ġweakest":34968,"ĠBulldogs":34969,"UA":34970,"ĠAlloy":34971,"Ġsemen":34972,"ocene":34973,"ĠWilliamson":34974,"spr":34975,",âĢĶ":34976,"ĠGF":34977,"ittens":34978,"Beat":34979,"ĠJunk":34980,"iphate":34981,"ĠFarmers":34982,"ĠBitcoins":34983,"igers":34984,"dh":34985,"ĠLoyal":34986,"payer":34987,"Ġentertained":34988,"Ġpenned":34989,"Ġcoupon":34990,"Queue":34991,"Ġweakening":34992,"carry":34993,"Ġunderestimate":34994,"Ġshootout":34995,"Ġcharismatic":34996,"ĠProcedure":34997,"Ġprudent":34998,"inances":34999,"Ġriches":35000,"Ġcortical":35001,"Ġstrides":35002,"Ġdrib":35003,"ĠOilers":35004,"540":35005,"ĠPerform":35006,"ĠBangkok":35007,"Ġeuth":35008,"SER":35009,"Ġsimplistic":35010,"tops":35011,"campaign":35012,"Quality":35013,"Ġimpoverished":35014,"ĠEisenhower":35015,"Ġaugment":35016,"ĠHarden":35017,"Ġintervened":35018,"Ġlistens":35019,"ĠKok":35020,"Ġsage":35021,"Ġrubbish":35022,"ĠDed":35023,"Ġmull":35024,"pelling":35025,"Ġvideot":35026,"Production":35027,"DJ":35028,"miah":35029,"Ġadaptations":35030,"Ġmedically":35031,"Ġboarded":35032,"Ġarrogance":35033,"Ġscrapped":35034,"Ġoppress":35035,"FORMATION":35036,"Ġjunction":35037,"415":35038,"EEEE":35039,"Skill":35040,"Ġsubdu":35041,"ĠSuggest":35042,"ĠPett":35043,"Ġlett":35044,"ĠManip":35045,"ĠCaf":35046,"ĠCooperation":35047,"Ther":35048,"Ġregained":35049,"¶æ":35050,"reflect":35051,"Ġthugs":35052,"ĠShelby":35053,"Ġdictates":35054,"ĠWeiner":35055,"ĠHale":35056,"Ġbattleground":35057,"schild":35058,"Ġcondol":35059,"hunt":35060,"ositories":35061,"Ġaccuses":35062,"Filename":35063,"Ġshri":35064,"Ġmotivate":35065,"Ġreflections":35066,"Null":35067,"ĠLobby":35068,"¥µ":35069,"ĠSATA":35070,"ĠBackup":35071,"Ñĥ":35072,"nin":35073,"ĠCorrection":35074,"Ġjuicy":35075,"utra":35076,"ĠPric":35077,"Ġrestraining":35078,"ĠAirbnb":35079,"ĠArrest":35080,"Ġappropriations":35081,"Ġslopes":35082,"Ġmanslaughter":35083,"Ġworkings":35084,"ĠHuss":35085,"ĠFrey":35086,"Leave":35087,"ĠHarmony":35088,"ĠFeder":35089,"Ġ430":35090,"Ġtrench":35091,"Ġgladly":35092,"Ġbullpen":35093,"ĠGau":35094,"bones":35095,"Ġgroove":35096,"Ġpretext":35097,"ãħĭ":35098,"Ġtransmitter":35099,"ĠComponent":35100,"Ġunderage":35101,"ĠEmpires":35102,"Tile":35103,"Ġoy":35104,"ĠMarvin":35105,"ĠCAS":35106,"Ġbloss":35107,"Ġreplicated":35108,"ĠMariners":35109,"Marcus":35110,"ĠBlocks":35111,"Ġliberated":35112,"Ġbutterfly":35113,"Feel":35114,"Ġfermentation":35115,"Ġyoutube":35116,"Ġoffend":35117,"ĠTerm":35118,"resist":35119,"Ġcessation":35120,"Ġinsurgency":35121,"Ġbir":35122,"ĠRaise":35123,"595":35124,"Ġhypotheses":35125,"502":35126,"Ġplaque":35127,"ocrat":35128,"Ġjackets":35129,"ĠHuffPost":35130,"among":35131,"Ġconfer":35132,"487":35133,"ĠLilly":35134,"Ġadapting":35135,"ĠFay":35136,"Ġshoved":35137,"vec":35138,"Ġrefine":35139,"Ġgon":35140,"Ġgunmen":35141,"zai":35142,"ĠShuttle":35143,"ĠIzan":35144,"Ġ1913":35145,"Ġplethora":35146,"··":35147,"Ġ510":35148,"Ġpuberty":35149,"Ġ241":35150,"ĠWealth":35151,"ĠAlma":35152,"ĠMEM":35153,"ĠAdults":35154,"Cas":35155,"prison":35156,"Race":35157,"Ġwaterproof":35158,"Ġathleticism":35159,"Ġcapitalize":35160,"ĠJuice":35161,"Ġilluminated":35162,"ĠPascal":35163,"Ġirritation":35164,"ĠWitnesses":35165,"adle":35166,"ĠAstro":35167,"Ġfax":35168,"ĠElvis":35169,"Primary":35170,"ĠLich":35171,"ĠElves":35172,"Ġresiding":35173,"Ġstumble":35174,"319":35175,"ĠPKK":35176,"Ġadversaries":35177,"DOS":35178,"ĠRitual":35179,"Ġsmear":35180,"Ġarson":35181,"idental":35182,"Ġscant":35183,"Ġmonarchy":35184,"Ġhalftime":35185,"Ġresidue":35186,"Ġindign":35187,"ĠShaun":35188,"ĠElm":35189,"auri":35190,"Aff":35191,"WATCH":35192,"ĠLyon":35193,"helps":35194,"361":35195,"Ġlobbyist":35196,"Ġdiminishing":35197,"Ġoutbreaks":35198,"Ġgoats":35199,"favorite":35200,"ĠNah":35201,"sonian":35202,"ĠBooster":35203,"Ġsandbox":35204,"ĠFare":35205,"ĠMalta":35206,"ĠattRot":35207,"ĠMOR":35208,"lde":35209,"Ġnavigating":35210,"Touch":35211,"Ġuntrue":35212,"ĠDisaster":35213,"Ġludicrous":35214,"Password":35215,"ĠJFK":35216,"blogspot":35217,"416":35218,"ĠUNDER":35219,"ernal":35220,"Ġdelaying":35221,"TOP":35222,"Ġimplants":35223,"ĠAVG":35224,"ĠHuge":35225,"attr":35226,"Ġjournalistic":35227,"ĠPeyton":35228,"ĠIA":35229,"Rap":35230,"goal":35231,"ĠProgramme":35232,"Ġsmashing":35233,"wives":35234,"println":35235,"ĠPlague":35236,"inus":35237,"EEP":35238,"Ġcruiser":35239,"ĠParish":35240,"uminium":35241,"Ġoccupants":35242,"ĠJihad":35243,"mop":35244,"Ġpint":35245,"Ġhect":35246,"ĠMecca":35247,"director":35248,"ĠFunding":35249,"ĠMixed":35250,"Ġstag":35251,"Tier":35252,"Ġgust":35253,"Ġbrightly":35254,"orsi":35255,"Ġuphill":35256,"RD":35257,"Ġlesions":35258,"ĠBundy":35259,"livious":35260,"Ġbiologist":35261,"ĠFaculty":35262,"ĠAuthorization":35263,"Ġ244":35264,"Allow":35265,"ï¸":35266,"ĠGiul":35267,"Ġpertinent":35268,"otaur":35269,"esse":35270,"ĠRoof":35271,"Ġunmanned":35272,"351":35273,"ĠShak":35274,"ĠOrient":35275,"Ġendanger":35276,"Dir":35277,"Ġreplen":35278,"edient":35279,"Ġtailor":35280,"Ġgadgets":35281,"Ġaudible":35282,"âĺĨ":35283,"Nice":35284,"Ġbombard":35285,"ĠRape":35286,"Ġdefiance":35287,"ĠTWO":35288,"ĠFilipino":35289,"Ġunaffected":35290,"ervatives":35291,"Ġsoared":35292,"ĠBolton":35293,"Ġcompromising":35294,"ĠBrewers":35295,"RAL":35296,"ĠAHL":35297,"icycle":35298,"Ġvampires":35299,"Ġdipped":35300,"oyer":35301,"ĠXIII":35302,"Ġsideways":35303,"ĠWaste":35304,"ĠDiss":35305,"ĠâĶľâĶĢâĶĢ":35306,"$.":35307,"Ġhabitats":35308,"ĠBeef":35309,"truth":35310,"trained":35311,"split":35312,"Rus":35313,"Andy":35314,"ĠBram":35315,"REP":35316,"pid":35317,"è£ħ":35318,"ĠMutant":35319,"Anim":35320,"ĠMarina":35321,"Ġfutile":35322,"highest":35323,"frequency":35324,"Ġepilepsy":35325,"Ġcoping":35326,"Ġconcise":35327,"Ġtracing":35328,"ĠSUN":35329,"panel":35330,"ĠSophie":35331,"ĠCrowley":35332,"ĠAdolf":35333,"ĠShooter":35334,"Ġshaky":35335,"ĠIG":35336,"ĠLies":35337,"ĠBarber":35338,"pkg":35339,"Ġuptake":35340,"Ġpredatory":35341,"ULTS":35342,"/**":35343,"Ġintoxicated":35344,"ĠWestbrook":35345,"odder":35346,"hement":35347,"Ġbaseman":35348,"APD":35349,"storage":35350,"ĠFifty":35351,"editor":35352,"GEN":35353,"UTION":35354,"irting":35355,"Ġsewing":35356,"rift":35357,"Ġagony":35358,"ĠSands":35359,"Ġ254":35360,"Cash":35361,"Ġlodge":35362,"Ġpunt":35363,"Natural":35364,"ĠIdeas":35365,"Ġerroneous":35366,"ĠSensor":35367,"ĠHannity":35368,"Ġ1921":35369,"Ġmould":35370,"ĠGon":35371,"kaya":35372,"Ġanonymously":35373,"ĠKEY":35374,"Ġsimulator":35375,"Winter":35376,"Ġstreamed":35377,"507":35378,"?\",":35379,"Ġteased":35380,"Ġcoefficient":35381,"Ġwartime":35382,"ĠTHR":35383,"''.":35384,"ĠBanking":35385,"mpire":35386,"Ġfandom":35387,"Ġlia":35388,"Ga":35389,"Ġdownhill":35390,"Ġinterpreting":35391,"Individual":35392,"Norm":35393,"Ġjealousy":35394,"bitcoin":35395,"Ġpleasures":35396,"ĠToys":35397,"ĠChevrolet":35398,"ĠAdvisor":35399,"IZE":35400,"Ġreceptions":35401,"706":35402,"Cro":35403,"Ġ262":35404,"Ġcitrus":35405,"iru":35406,"Reviewer":35407,"jected":35408,"UES":35409,"anz":35410,"1981":35411,"ĠWorker":35412,"Ġcomplied":35413,"orescent":35414,"continental":35415,"Ton":35416,"ĠPrism":35417,"ĠSheep":35418,"Ġ288":35419,"nox":35420,"ĠVog":35421,"Ord":35422,"Ġrealms":35423,"tek":35424,"Ġirrigation":35425,"Ġbicycles":35426,"Ġelectronically":35427,"poly":35428,"tall":35429,"());":35430,"Ġaesthetics":35431,"ĠIntegrated":35432,"Explore":35433,"Ġdunk":35434,"476":35435,"pain":35436,"ĠJacques":35437,"ĠDmit":35438,"Frames":35439,"Ġreunited":35440,"Ġhumid":35441,"Dro":35442,"Political":35443,"Ġyouthful":35444,"Ġentails":35445,"Ġmosquito":35446,"363":35447,"species":35448,"Ġcoordinating":35449,"ĠMayhem":35450,"ĠMagnus":35451,"Mount":35452,"Improved":35453,"ĠSTATE":35454,"ATTLE":35455,"Ġflowed":35456,"Ġtackled":35457,"Ġfashioned":35458,"Ġreorgan":35459,"ivari":35460,"finger":35461,"Ġreluctantly":35462,"etting":35463,"ĠVand":35464,"young":35465,"ĠGarland":35466,"Ġpresumption":35467,"Ġamenities":35468,"ĠPleasant":35469,"onential":35470,"ĠOxy":35471,"Ġmorals":35472,"ĠYah":35473,"Ready":35474,"Simon":35475,"Enh":35476,"Demon":35477,"Ġclich":35478,"Monitor":35479,"ĠDU":35480,"Ġwelcomes":35481,"Ġstandout":35482,"Ġdreadful":35483,"Ġbananas":35484,"Ġballoons":35485,"hooting":35486,"basic":35487,"Ġsuffix":35488,"Ġduly":35489,"cano":35490,"Chain":35491,"atos":35492,"Ġgeopolitical":35493,"Ġ(&":35494,"ĠGemini":35495,"ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ":35496,"Ġacquitted":35497,"Luck":35498,"protect":35499,"1024":35500,"Ġscarcity":35501,"Ġmindfulness":35502,"ecided":35503,"DN":35504,"prime":35505,"ĠPresidents":35506,"ĠVIDEO":35507,"Ġ(âĪĴ":35508,"addock":35509,"NOR":35510,"ĠPru":35511,"pun":35512,"ĠLOL":35513,"))))":35514,"ĠLiqu":35515,"ĠSAS":35516,"Ġstyling":35517,"Ġpunishments":35518,"Ġnumb":35519,"Ġascertain":35520,"ĠRockies":35521,"flu":35522,"Thumbnail":35523,"Ġperpetrated":35524,"ĠSemi":35525,"Ġdisarm":35526,"ĠOlder":35527,"ĠException":35528,"Ġexponentially":35529,"ĠCommunities":35530,"Ġabolish":35531,"ĠPartner":35532,"ptoms":35533,"Ġ777":35534,"ĠFoley":35535,"ĠCases":35536,"Ġgrease":35537,"ĠRebirth":35538,"Ground":35539,"Ġ;)":35540,"ĠDoctrine":35541,"ikini":35542,"Ye":35543,"ĠBlossom":35544,"Ġpersists":35545,"bill":35546,"Ġinfusion":35547,"Ġbuddies":35548,"911":35549,"ĠPatient":35550,"Ġdemos":35551,"Ġacquaintance":35552,"ĠPaw":35553,"atari":35554,"Ġxml":35555,"Ġfascination":35556,"ĠServe":35557,"ÏĤ":35558,"branded":35559,"Ġaz":35560,"Returns":35561,"Ġovershadow":35562,"Ġroam":35563,"Ġspeedy":35564,"numbered":35565,"helial":35566,"Ġdisciple":35567,"Ġassurances":35568,"given":35569,"pecting":35570,"ĠNatalie":35571,"çĶ°":35572,"Ġmosquitoes":35573,"rotein":35574,"Ġnumeric":35575,"Ġindependents":35576,"Ġtransitional":35577,"Ġreactionary":35578,"ĠMechdragon":35579,"doctor":35580,"Ġshortest":35581,"Ġsequential":35582,"ĠBac":35583,"ĠAccounts":35584,"ãģĮ":35585,"achy":35586,"ractive":35587,"ĠRegiment":35588,"Ġbreathtaking":35589,"fficiency":35590,"ĠBates":35591,"Ġ311":35592,"Ġwardrobe":35593,"fts":35594,"ĠBerk":35595,"Simply":35596,"ĠRiverside":35597,"ivering":35598,"idential":35599,"lucent":35600,"Ġenriched":35601,"ĠConver":35602,"ĠGiving":35603,"ãĥĻ":35604,"Ġlegalize":35605,"ĠFTC":35606,"Ġfreaking":35607,"Mix":35608,"Ġterrestrial":35609,"esian":35610,"cients":35611,"Wing":35612,"LOAD":35613,"Ġledge":35614,"ĠViolent":35615,"ĠMetall":35616,"Ġ308":35617,"Ġsoutheastern":35618,"hetto":35619,"Meat":35620,"Ġslowdown":35621,"Ġretreated":35622,"Jeremy":35623,"endas":35624,"*****":35625,"eric":35626,"Ġreins":35627,"oppable":35628,"ĠHumanity":35629,"earances":35630,"rigan":35631,"Camera":35632,"Ġwaivers":35633,"soc":35634,"Ġalteration":35635,"transform":35636,"ĠCemetery":35637,"506":35638,"Ġindefinite":35639,"Ġstimulating":35640,"yg":35641,"603":35642,"ĠSop":35643,"Ġdescriptive":35644,"Phase":35645,"ĠEdmund":35646,"Ġpneumonia":35647,"ventus":35648,"Amb":35649,"Ġlaboratories":35650,"ĠExclusive":35651,"ugar":35652,"Were":35653,"Ġmalfunction":35654,"Ġhomosexuals":35655,"Ġ-------":35656,"uni":35657,"Ġturbines":35658,"ĠEquity":35659,"Du":35660,"Ġminded":35661,"ĠRH":35662,"ĠBlackhawks":35663,"Ġfeats":35664,"Ġ1700":35665,"repl":35666,"362":35667,"laden":35668,"Ġindispensable":35669,"lyss":35670,"tti":35671,"Ġreel":35672,"Ġdiverted":35673,"Ġlikeness":35674,"Ġsubscriptions":35675,"Ġfingert":35676,"Ġfilthy":35677,"destruct":35678,"draft":35679,"ĠBernardino":35680,"launch":35681,"Ġperplex":35682,"ĠSUM":35683,"carb":35684,"Ġsweater":35685,"ĠVenture":35686,"ĠJag":35687,"ĠCeleb":35688,"ĠVoters":35689,"Ġsteadfast":35690,"Ġathletics":35691,"ĠHanson":35692,"ĠDrac":35693,"Tracker":35694,"Ġcommend":35695,"ĠPresidency":35696,"ĠDID":35697,"informed":35698,"Ġwebpage":35699,"Pretty":35700,"Ġforcefully":35701,"ãĥĥãĤ¯":35702,"Ġrelocation":35703,"Ġsatire":35704,"âī":35705,"ĠSunderland":35706,"æĦ":35707,"Voice":35708,"????????":35709,"Ġinformant":35710,"Ġbowel":35711,"ĠUniform":35712,"Ġ...\"":35713,"Ġpurge":35714,"Ġpicnic":35715,"ĠUmb":35716,"ĠUPDATE":35717,"ĠSapphire":35718,"ĠStall":35719,"learn":35720,"Ġobjectively":35721,"Ġobliter":35722,"Ġloophole":35723,"Ġjourneys":35724,"Ġomission":35725,"Pros":35726,"ĠSidney":35727,"ploma":35728,"Ġsprayed":35729,"Ġguru":35730,"Ġtraitor":35731,"Ġtimet":35732,"Ġsnapping":35733,"ĠSevent":35734,"urnal":35735,"ĠUkip":35736,"Ġbowed":35737,"poral":35738,"liberal":35739,"Ros":35740,"Questions":35741,"iOS":35742,"Ġsummarize":35743,"STAT":35744,"Ġ1850":35745,"apest":35746,"Ġlender":35747,"ĠVariable":35748,"bringing":35749,"ĠLORD":35750,",)":35751,"Ġcollapses":35752,"xiety":35753,"ĠNed":35754,"YD":35755,"ĠScha":35756,"Ġantibody":35757,"Ġdisband":35758,"yre":35759,"illusion":35760,"Ġrover":35761,"shed":35762,"ĠHirosh":35763,"cci":35764,"Ġcalam":35765,"ĠMorton":35766,"Pinterest":35767,"Ġ1928":35768,"ĠEuras":35769,"ordes":35770,"Ġfences":35771,"ĠInventory":35772,"ĠValencia":35773,"ĠUd":35774,"ĠTiff":35775,"Ġsque":35776,"Ġquotation":35777,"Ġtroublesome":35778,"erker":35779,"QUEST":35780,"ĠKingdoms":35781,"south":35782,"Ġlevy":35783,"Prince":35784,"ĠSting":35785,"Ġnicknamed":35786,"Ġappe":35787,"Ġphotographic":35788,"Ġcorpus":35789,"reference":35790,"ĠTrog":35791,"Unt":35792,")=(":35793,"ĠLatvia":35794,"Ġactivating":35795,"Ġlicensee":35796,"Ġdisparities":35797,"ĠNewsletter":35798,"ãĥĥãĥĪ":35799,"Ġfreeing":35800,"ĠJeep":35801,"ĠPerception":35802,"insk":35803,"Ġsilicone":35804,"ĠHayden":35805,"Lean":35806,"ĠSuzuki":35807,"ibrarian":35808,"668":35809,"Ġspor":35810,"Ġcorrelations":35811,"aghetti":35812,"Ġtuber":35813,"ĠIPCC":35814,"ilus":35815,"ĠVu":35816,"Ġwealthiest":35817,"ĠCarbuncle":35818,"anza":35819,"Ġfooled":35820,"ĠZur":35821,"Ġdaddy":35822,"rano":35823,"ilian":35824,"Ġknockout":35825,"fman":35826,"required":35827,"ĠWikileaks":35828,"ĠDuffy":35829,"ONT":35830,"Ġinsol":35831,"ĠObjects":35832,"Ġbou":35833,"ĠNordic":35834,"ĠInsert":35835,"scan":35836,"Ġdancers":35837,"Ġidiots":35838,"majority":35839,"ĠNeville":35840,"ĠFreeBSD":35841,"Ġtart":35842,"panic":35843,"690":35844,"Ġcocoa":35845,"Ġsampled":35846,"Ġlookup":35847,"Indust":35848,"Ġinjections":35849,"genre":35850,"Ġau":35851,"Ġroadway":35852,"Ġgenitals":35853,"Kind":35854,"ĠExaminer":35855,"ĠYaz":35856,"Fresh":35857,"Ġparalysis":35858,"ĠAluminum":35859,"Ġreap":35860,"oké":35861,"Ġsloppy":35862,"ĠTunnel":35863,"posium":35864,"nery":35865,"enic":35866,"Ġherbal":35867,"ĠOuter":35868,"ĠBuilder":35869,"Ġincur":35870,"Ġideologies":35871,"Ġbackups":35872,"consuming":35873,"ĠDetect":35874,"deck":35875,"ĠKNOW":35876,"ĠGret":35877,"ĠMIC":35878,"Ġtoughness":35879,"ĠExhibit":35880,"Ġhive":35881,"Les":35882,"ĠSCHOOL":35883,"ĠAtari":35884,"alde":35885,"ĠNull":35886,"andestine":35887,"mouse":35888,"Ġbrigade":35889,"489":35890,"Ġrevol":35891,"ĠLawson":35892,"ĠWah":35893,"opoly":35894,"ebted":35895,"ĠSaunders":35896,"Ġ313":35897,"ĠWinc":35898,"Ġtaboo":35899,"ĠHelmet":35900,"Ġwedge":35901,"chip":35902,"ĠTina":35903,"bg":35904,"Ġinfuri":35905,"rn":35906,"Ġanomalies":35907,"ĠSync":35908,"ĠExam":35909,"ĠCommit":35910,"ĠDiary":35911,"ĠALSO":35912,"ĠDebor":35913,"omedical":35914,"Ġcomprehension":35915,"655":35916,"Ġempowering":35917,"Ġire":35918,"Ġjuices":35919,"ĠETH":35920,"ĠBoxing":35921,"=\"/":35922,"Ġfacilitated":35923,"poke":35924,"ĠParsons":35925,"ĠModer":35926,"travel":35927,"Ġcivilizations":35928,"Ġlibertarians":35929,"Ġrune":35930,"ĠClarks":35931,"athed":35932,"Ġcampaigners":35933,"ĠDispatch":35934,"ĠFahrenheit":35935,"ĠCapcom":35936,"----------":35937,"Ġlace":35938,"Ġdraining":35939,"Ġliner":35940,"ĠArtificial":35941,"én":35942,"task":35943,"]).":35944,"ĠGMO":35945,"ĠOperator":35946,"ordinary":35947,"ĠInfluence":35948,"ĠUps":35949,"Ġpotency":35950,"ussen":35951,"ospons":35952,"ĠSwim":35953,"ĠDeadline":35954,"Unity":35955,"Ġculinary":35956,"Ġenlightenment":35957,"Ġwearer":35958,"Ġmined":35959,"Ġply":35960,"Ġincest":35961,"ĠDVDs":35962,"Walk":35963,"BTC":35964,"Trade":35965,"Ġdeval":35966,"iband":35967,"ĠOversight":35968,"Palestinian":35969,"Ġdart":35970,"Ġmul":35971,"LR":35972,"Ġremovable":35973,"ĠRealms":35974,"ìĿ":35975,"Ġmiscar":35976,"ĠVulkan":35977,"685":35978,"ère":35979,"ĠSap":35980,"Ġmerging":35981,"ĠCarly":35982,"chester":35983,"Ġbrisk":35984,"Ġluxurious":35985,"ĠGenerator":35986,"Ġbitterness":35987,"Ġedible":35988,"Ġ243":35989,"TG":35990,"Ġrectangle":35991,"WithNo":35992,"below":35993,"Jenn":35994,"Ġdarkest":35995,"Ġhitch":35996,"Ġdosage":35997,"Ġscaven":35998,"ĠKeller":35999,"ĠIllustrated":36000,"Certainly":36001,"ĠMavericks":36002,"Marginal":36003,"Ġdiarrhea":36004,"Ġenormously":36005,"Ġ999":36006,"shr":36007,"quart":36008,"Ġadamant":36009,"ĠMew":36010,"Ġrenovation":36011,"Ġcervical":36012,"ĠPercentage":36013,"eners":36014,"ĠKimber":36015,"Ġfloats":36016,"Ġdex":36017,"ĠWitcher":36018,"ĠSwansea":36019,"dm":36020,"Ġsalty":36021,"yellow":36022,"Ġcape":36023,"ĠDrain":36024,"ĠPaula":36025,"ĠToledo":36026,"lesi":36027,"Magazine":36028,"ĠWick":36029,"ĠMn":36030,"ĠAck":36031,"ĠRiding":36032,"ASON":36033,"Ġhomophobic":36034,"ARP":36035,"Ġwandered":36036,"CPU":36037,"oodoo":36038,"ĠPipe":36039,"Ġtightening":36040,"ĠButt":36041,"318":36042,"Ġdeserted":36043,"Session":36044,"Ġfacilitating":36045,"Jump":36046,"Ġemergencies":36047,"OWER":36048,"Ġexhaustive":36049,"ĠAFTER":36050,"Ġheartbeat":36051,"ĠLabel":36052,"acky":36053,"ĠCertified":36054,"iltration":36055,"Ze":36056,"ĠUtt":36057,"Ġ1300":36058,"Ġpresume":36059,"ĠDisp":36060,"Ġsurged":36061,"Ġdolls":36062,"Columb":36063,"Ġchimpan":36064,"ĠRazor":36065,"Ġticks":36066,"Ġcouncillor":36067,"Ġpilgrimage":36068,"ĠRebels":36069,"ĠQC":36070,"ĠAuction":36071,"xia":36072,"ikk":36073,"bred":36074,"Ġinsertion":36075,"Ġcoarse":36076,"dB":36077,"SEE":36078,"ĠZap":36079,"ĠFoo":36080,"Ġcontempor":36081,"ĠQuarterly":36082,"otions":36083,"ĠAlchemist":36084,"ĠTrey":36085,"ĠDuo":36086,"Sweet":36087,"804":36088,"ĠGiov":36089,"Ġfunn":36090,"Nin":36091,"hoff":36092,"Ġramifications":36093,"Ġ1922":36094,"ĠExperts":36095,"azes":36096,"Ġgarments":36097,"arial":36098,"ĠNab":36099,"Ġ257":36100,"ĠVed":36101,"Ġhumorous":36102,"ĠPompe":36103,"Ġnylon":36104,"Ġlurking":36105,"ĠSergey":36106,"ĠMattis":36107,"Ġmisogyny":36108,"ĠComponents":36109,"ĠWatching":36110,"ĠFolk":36111,"ractical":36112,"Bush":36113,"Ġtaped":36114,"Ġgrouping":36115,"Ġbeads":36116,"Ġ2048":36117,"Ġcondu":36118,"querque":36119,"Reading":36120,"Ġgrievances":36121,"Ultra":36122,"Ġendpoint":36123,"Hig":36124,"ĠStatic":36125,"ĠScarborough":36126,"Lua":36127,"ĠMessi":36128,"aqu":36129,"ĠPsyNet":36130,"ĠRudd":36131,"Ġavenue":36132,"vp":36133,"Jer":36134,"Ġshady":36135,"ĠResist":36136,"ĠArtemis":36137,"Ġcareless":36138,"Ġbrokers":36139,"Ġtemperament":36140,"Ġ520":36141,"Tags":36142,"ĠTurning":36143,"Ġuttered":36144,"Ġpedd":36145,"Ġimprovised":36146,"Ġ:(":36147,"Ġtabl":36148,"Ġplains":36149,"1600":36150,"pressure":36151,"ĠEssence":36152,"margin":36153,"friends":36154,"ĠRestoration":36155,"Ġpollut":36156,"ĠPoker":36157,"ĠAugustine":36158,"ĠCIS":36159,"ĠSEAL":36160,"orama":36161,"Ġthwart":36162,"seek":36163,"Ġpagan":36164,"º":36165,"cpu":36166,"Ġgarn":36167,"Ġassortment":36168,"ĠILCS":36169,"tower":36170,"Recommended":36171,"Ġunborn":36172,"ĠRandomRedditor":36173,"ĠRandomRedditorWithNo":36174,"Ġparalyzed":36175,"Ġeruption":36176,"Ġintersect":36177,"ĠStoke":36178,"ĠSco":36179,"Bind":36180,"å¾":36181,"ĠPNG":36182,"ĠNegative":36183,"ĠNOAA":36184,"Leon":36185,"Ġalloy":36186,"ĠLama":36187,"ĠDiversity":36188,"575":36189,"Ġunderestimated":36190,"ĠScor":36191,"Ġmural":36192,"Ġbusted":36193,"soon":36194,"lif":36195,"Ġnonex":36196,"Ġallergy":36197,"ĠUnderworld":36198,"ĠRays":36199,"ĠBlasio":36200,"Ġhrs":36201,"ĠDir":36202,"Ġ327":36203,"byter":36204,"Ġreplacements":36205,"Ġactivates":36206,"rived":36207,"MH":36208,"Ġpans":36209,"ĠHI":36210,"Ġlongitudinal":36211,"Ġnuisance":36212,"aler":36213,"Ġswell":36214,"ĠSigned":36215,"sci":36216,"ĠIsles":36217,"ĠAGA":36218,"Ġdefiant":36219,"Ġsonic":36220,"ocon":36221,"KC":36222,"ĠAim":36223,"tie":36224,"ahah":36225,"ĠmL":36226,"DX":36227,"Ġbisc":36228,"ĠBillboard":36229,"ĠSYSTEM":36230,"NEY":36231,"gaard":36232,"Ġdistressed":36233,"formerly":36234,"Alan":36235,"Ġchefs":36236,"Ġoptics":36237,"ĠComet":36238,"ĠAMC":36239,"Ġredesigned":36240,"irmation":36241,"Ġsightings":36242,"382":36243,"311":36244,"ĠWB":36245,"Ġcontraction":36246,"ĠTOTAL":36247,"Dual":36248,"Ġstartled":36249,"Ġunderstandably":36250,"Ġsunglasses":36251,"ETHOD":36252,"Ġdocker":36253,"Ġsurfing":36254,"ĠHEL":36255,"ĠSlack":36256,"tones":36257,"Ġshalt":36258,"Visual":36259,"498":36260,"Department":36261,"cussion":36262,"Ġunrestricted":36263,"Ġtad":36264,"Ġrename":36265,"employed":36266,"Ġeducating":36267,"Ġgrinned":36268,"bedroom":36269,"ĠActivities":36270,"ĠVelvet":36271,"ĠSWAT":36272,"Ġshuffle":36273,"igor":36274,"Ġsaturation":36275,"Finding":36276,"cream":36277,"icter":36278,"Ġvodka":36279,"tracking":36280,"tec":36281,"Ġforeground":36282,"iesta":36283,"Ġvehement":36284,"ĠECB":36285,"ĠTie":36286,"Ey":36287,"Ġturtles":36288,"ĠRailroad":36289,"ĠKatz":36290,"ĠFrames":36291,"Ġmenace":36292,"ĠFellowship":36293,"ĠEssential":36294,"uggish":36295,"Ġdrip":36296,"chwitz":36297,"ĠKyoto":36298,"sb":36299,"ĠNina":36300,"Parameter":36301,"Ġalarms":36302,"ĠClaud":36303,"Ġpioneering":36304,"Ġchiefly":36305,"ĠScream":36306,"Collection":36307,"Ġthankfully":36308,"ĠRonaldo":36309,"åŃIJ":36310,"strip":36311,"ĠDisneyland":36312,"commercial":36313,"Seeing":36314,"Soul":36315,"Ġevacuate":36316,"Ġciv":36317,"ĠAshe":36318,"Ġdivides":36319,"ĠDagger":36320,"rehensive":36321,"Ġberries":36322,"ĠDF":36323,"Ġsushi":36324,"Ġplurality":36325,"WI":36326,"Ġdisadvantaged":36327,"Ġbattalion":36328,"obiles":36329,"451":36330,"Ġcling":36331,"Ġundeniable":36332,"ĠLounge":36333,"Ġhaunt":36334,"phe":36335,"Ġquantify":36336,"Ġdiffered":36337,"Ġ[*]":36338,"ĠViz":36339,"cum":36340,"slave":36341,"Ġvideog":36342,"Ġquar":36343,"Ġbundles":36344,"ĠAlonso":36345,"tackle":36346,"Ġneuronal":36347,"Ġlandslide":36348,"confirmed":36349,"ĠDepth":36350,"Ġrenewables":36351,"Bear":36352,"ĠMacedonia":36353,"Ġjerseys":36354,"Ġbunk":36355,"ĠSpawn":36356,"ĠControls":36357,"ĠBuchanan":36358,"Ġrobotics":36359,"Ġemphasizing":36360,"ĠTutorial":36361,"hyp":36362,"iston":36363,"Ġmonumental":36364,"æ°":36365,"ĠCarry":36366,"Ġtbsp":36367,"enance":36368,"Hill":36369,"arthed":36370,"Ġrotten":36371,"Dean":36372,"Ġtwisting":36373,"Ġgoodwill":36374,"Ġimmersion":36375,"Living":36376,"Ġbrushes":36377,"ĠCGI":36378,"ĠAtk":36379,"traditional":36380,"Ġphantom":36381,"ĠStamina":36382,"Ġexpansions":36383,"ĠMarin":36384,"Ġembarked":36385,"ĠEg":36386,"intestinal":36387,"ĠPEOPLE":36388,"ĠBooth":36389,"ĠAppalach":36390,"Ġrelegated":36391,"VT":36392,"MIT":36393,"Ġmuster":36394,"Ġwithdrawing":36395,"Ġmicroscope":36396,"ĠGathering":36397,"ĠCrescent":36398,"ĠArgentine":36399,"ĠDecre":36400,"ĠDominic":36401,"Ġbuds":36402,"antage":36403,"ĠIon":36404,"Ġwidened":36405,"ONSORED":36406,"ĠGloves":36407,"iannopoulos":36408,"razen":36409,"feel":36410,"Ġrepayment":36411,"Ġhindsight":36412,"ĠREALLY":36413,"ĠPistol":36414,"ĠBrah":36415,"Ġwatts":36416,"Ġsurvives":36417,"Ġflurry":36418,"issy":36419,"Alert":36420,"ĠUruguay":36421,"Phoenix":36422,"Slow":36423,"ĠGrave":36424,"ĠFir":36425,"Ġmanageable":36426,"Ġtariff":36427,"ĠUDP":36428,"ĠPistons":36429,"ĠNigerian":36430,"Ġstrikeouts":36431,"Ġcosmetics":36432,"whelming":36433,"fab":36434,"cape":36435,"proxy":36436,"Ġrethink":36437,"Ġovercoming":36438,"simple":36439,"Ġwoo":36440,"Ġdistracting":36441,"ĠStanton":36442,"ĠTulsa":36443,"ĠDock":36444,"659":36445,"Ġdiscord":36446,"ĠEmacs":36447,"ĠVes":36448,"ĠROB":36449,"Ġreassuring":36450,"Ġconsortium":36451,"Muslims":36452,"321":36453,"Ġprompts":36454,"sei":36455,"ĠHitch":36456,"imposed":36457,"ĠFool":36458,"Ġindiscrim":36459,"wrong":36460,"buquerque":36461,"Davis":36462,"!]":36463,"Ġtimeless":36464,"ĠNEED":36465,"Ġpesticide":36466,"Ġrallying":36467,"ĠCalder":36468,"Ġå¤":36469,"Ġxp":36470,"ĠUnle":36471,"ĠExport":36472,"luaj":36473,"Buff":36474,")[":36937,"Ġsqor":36938,"Saudi":36939,"Ġistg":36940,"Ġindulge":36941,"proc":36942,"Ġdisgusted":36943,"Ġcompounded":36944,"Ġnem":36945,"Ġschooling":36946,"ĠCure":36947,"processing":36948,"Sol":36949,"Ġproverb":36950,"itized":36951,"ĠAlvarez":36952,"Ġscarf":36953,"Ġrectangular":36954,"reve":36955,"Ġhormonal":36956,"ĠStress":36957,"itizen":36958,"Ġ425":36959,"girls":36960,"ĠNoir":36961,"ĠRapp":36962,"Ġmarches":36963,"church":36964,"ĠUses":36965,"Ġ405":36966,"ĠBerm":36967,"Ġordinances":36968,"ĠJudgment":36969,"Charges":36970,"ĠZin":36971,"Ġdusty":36972,"Ġstrawberries":36973,"Ġperce":36974,"ĠThur":36975,"ĠDeborah":36976,"netflix":36977,"ĠLambert":36978,"Ġamused":36979,"ĠGuang":36980,"YOU":36981,"RGB":36982,"ĠCCTV":36983,"Ġfiat":36984,"rang":36985,"Ġfederation":36986,"ĠMant":36987,"ĠBust":36988,"ĠMare":36989,"respective":36990,"ĠMigration":36991,"ĠBIT":36992,"590":36993,"Ġpatriotism":36994,"Ġoutlining":36995,"region":36996,"ĠJosé":36997,"Ġblasting":36998,"ĠEzra":36999,"Bs":37000,"Ġundermines":37001,"ĠSmooth":37002,"Ġclashed":37003,"radio":37004,"Ġtransitioning":37005,"ĠBuccaneers":37006,"ĠOwl":37007,"Ġplugs":37008,"Ġhiatus":37009,"ĠPinball":37010,"Ġmig":37011,"ĠNutr":37012,"ĠWolfe":37013,"Ġintegers":37014,"Ġorbits":37015,"ĠEdwin":37016,"ĠDirectX":37017,"bite":37018,"Ġblazing":37019,"vr":37020,"Edge":37021,"ĠPID":37022,"exit":37023,"ĠComed":37024,"ĠPathfinder":37025,"ĠGuid":37026,"ĠSigns":37027,"ĠZer":37028,"ĠAgenda":37029,"Ġreimbursement":37030,"Mesh":37031,"iPhone":37032,"ĠMarcos":37033,"ĠSites":37034,"hate":37035,"enburg":37036,"Ġsockets":37037,"pend":37038,"Batman":37039,"vir":37040,"ĠSHOW":37041,"Ġprovisional":37042,"conn":37043,"ĠDeaths":37044,"ATIVE":37045,"Profile":37046,"sym":37047,"JA":37048,"Ġninja":37049,"installed":37050,"idates":37051,"ebra":37052,"ĠOmaha":37053,"Ġseizing":37054,"ĠBeasts":37055,"Ġsalts":37056,"Mission":37057,"Generally":37058,"ĠTrilogy":37059,"heon":37060,"legates":37061,"Ġdime":37062,"Ġfaire":37063,"parable":37064,"Graph":37065,"Ġtotaling":37066,"Ġdiagrams":37067,"ĠYanuk":37068,"plet":37069,"ĠMeh":37070,"Ġmythical":37071,"ĠStephens":37072,"autical":37073,"ochemistry":37074,"Ġkilograms":37075,"Ġelbows":37076,"ancock":37077,"ĠBCE":37078,"ĠPrague":37079,"Ġimprov":37080,"ĠDevin":37081,"Ġ\"\\":37082,"paralle":37083,"Ġsupremacists":37084,"ĠBillion":37085,"Ġregimen":37086,"innacle":37087,"Ġrequisite":37088,"angan":37089,"ĠBurlington":37090,"ainment":37091,"ĠObjective":37092,"omsky":37093,"GV":37094,"Ġunilateral":37095,"Ġtc":37096,"Ġhires":37097,"mental":37098,"Ġinvoluntary":37099,"Ġtranspl":37100,"ĠASCII":37101,"¨":37102,"Events":37103,"Ġdoubted":37104,"ĠKaplan":37105,"ĠCourage":37106,"igon":37107,"ĠManaging":37108,"ĠTart":37109,"Ġfalsehood":37110,"ĠViolet":37111,"Ġairs":37112,"Ġfertilizer":37113,"Britain":37114,"Ġaquatic":37115,"ouf":37116,"Words":37117,"ĠHartford":37118,"Ġevenings":37119,"ĠVengeance":37120,"quite":37121,"Gall":37122,"ĠPret":37123,"Ġpdf":37124,"ĠLM":37125,"ĠSochi":37126,"ĠIntercept":37127,"920":37128,"Ġprofitability":37129,"ĠIdle":37130,"ĠMacDonald":37131,"ĠEstablishment":37132,"umsy":37133,"Ġgatherings":37134,"ĠNaj":37135,"Charlie":37136,"Ġascent":37137,"ĠProtector":37138,"Ġalgebra":37139,"Ġbios":37140,"forums":37141,"ELS":37142,"Introduced":37143,"Ġ335":37144,"Ġastronomy":37145,"Contribut":37146,"ĠPolic":37147,"Platform":37148,"Ġcontainment":37149,"wrap":37150,"Ġcoronary":37151,"ĠJelly":37152,"manager":37153,"Ġheartbreaking":37154,"cair":37155,"ĠChero":37156,"cgi":37157,"Medical":37158,"ĠAccountability":37159,"!!\"":37160,"ophile":37161,"Ġpsychotic":37162,"ĠRestrict":37163,"Ġequitable":37164,"issues":37165,"Ġ1905":37166,"ĠNek":37167,"cised":37168,"ĠTracking":37169,"Ġozone":37170,"Ġcooker":37171,"rosis":37172,"Ġreopen":37173,"Ġinfinity":37174,"ĠPharmaceutical":37175,"ensional":37176,"Attempt":37177,"ĠRory":37178,"Marco":37179,"Ġawaits":37180,"HOW":37181,"treated":37182,"Ġbolst":37183,"Ġrevered":37184,"Ġpods":37185,"oppers":37186,"0010":37187,"Ġamplitude":37188,"rican":37189,"SPONSORED":37190,"Ġtrousers":37191,"Ġhalves":37192,"ĠKaine":37193,"ĠCutler":37194,"ĠAUTH":37195,"Ġsplendid":37196,"Ġpreventive":37197,"ĠDudley":37198,"ifacts":37199,"uminati":37200,"ĠYin":37201,"Ġadmon":37202,"ĠVag":37203,"Ġinverted":37204,"Ġhastily":37205,"ĠHague":37206,"Lyn":37207,"Ġledger":37208,"Ġastronomical":37209,"getting":37210,"Ġcirca":37211,"ĠCic":37212,"ĠTennis":37213,"Limited":37214,"Ġdru":37215,"ĠBYU":37216,"Ġtravellers":37217,"Ġpane":37218,"ĠIntro":37219,"Ġpatiently":37220,"Ġaiding":37221,"Ġloos":37222,"ĠTough":37223,"Ġ293":37224,"Ġconsumes":37225,"SourceFile":37226,"Ġ\"\"\"":37227,"Ġbonding":37228,"Ġtilted":37229,"Ġmenstrual":37230,"ĠCelestial":37231,"ULAR":37232,"Plugin":37233,"Ġrisking":37234,"Naz":37235,"ĠRiyadh":37236,"Ġaccredited":37237,"Ġskirm":37238,"éĽ":37239,"Ġexaminer":37240,"Ġmessing":37241,"Ġnearing":37242,"ĠChern":37243,"ĠBeckham":37244,"Ġswapped":37245,"Ġgoose":37246,"Kay":37247,"Ġlofty":37248,"ĠWallet":37249,"Ġ['":37250,"Ġapocalypse":37251,"Ġbamboo":37252,"ĠSPACE":37253,"ĠElena":37254,"Ġ306":37255,"acons":37256,"Ġtightened":37257,"Ġadolescence":37258,"Ġrainy":37259,"Ġvandalism":37260,"ĠNewtown":37261,"Ġconject":37262,"cakes":37263,"Ġcheated":37264,"Ġmoderators":37265,"params":37266,"EFF":37267,"Ġdeceit":37268,"ĠSTL":37269,"ĠTanzania":37270,"ĠRI":37271,"Ġ1923":37272,"ĠExile":37273,"thel":37274,"Ġtheolog":37275,"Ġquirky":37276,"ĠIrvine":37277,"Ġneedy":37278,"oris":37279,"Um":37280,"Ka":37281,"Ġmailbox":37282,"322":37283,"Ġbos":37284,"ĠPetra":37285,"KING":37286,"Ġenlarged":37287,"Often":37288,"Ġbadass":37289,"Ġ343":37290,"ĠPlaces":37291,"ĠCAD":37292,"Ġpristine":37293,"Ġintervening":37294,"direction":37295,"Ġlaz":37296,"ĠDSM":37297,"Ġprojecting":37298,"ĠFunk":37299,"agog":37300,"payment":37301,"nov":37302,"Ġchatter":37303,"ARB":37304,"Ġexaminations":37305,"ĠHousehold":37306,"ĠGus":37307,"Ford":37308,"414":37309,"Boss":37310,"Ġmystic":37311,"Ġleaps":37312,"ĠBav":37313,"ulz":37314,"budget":37315,"Football":37316,"Ġsubsidized":37317,"Ġfirsthand":37318,"Ġcoincide":37319,"ocular":37320,"Conn":37321,"ĠCollabor":37322,"Ġfools":37323,"amura":37324,"ahar":37325,"rists":37326,"Ġswollen":37327,"Ġexpended":37328,"ĠPau":37329,"sup":37330,"Ġspar":37331,"Ġkeynote":37332,"suff":37333,"Ġunequal":37334,"Ġprogressing":37335,"strings":37336,"ĠGamergate":37337,"Disney":37338,"ĠEleven":37339,"omnia":37340,"Ġscripted":37341,"Ġearners":37342,"brother":37343,"ĠEnabled":37344,"æ³":37345,"Ġlarvae":37346,"ĠLOC":37347,"mess":37348,"Wilson":37349,"ĠTemplate":37350,"successfully":37351,"Ġparamount":37352,"Ġcamouflage":37353,"Ġbinds":37354,"ĠQuiet":37355,"ĠShutterstock":37356,"rush":37357,"Ġmascot":37358,"fortune":37359,"ĠColt":37360,"ĠBeyon":37361,"habi":37362,"Ġhairc":37363,"Ġ267":37364,"ĠDeus":37365,"Ġtwitch":37366,"Ġconcentrating":37367,"Ġnipples":37368,"cible":37369,"Ġgir":37370,"NZ":37371,"Math":37372,"nih":37373,"Required":37374,"Ġponder":37375,"ĠSAN":37376,"Ġweddings":37377,"Ġloneliness":37378,"NES":37379,"ĠMahjong":37380,"695":37381,"addle":37382,"ĠGarner":37383,"ĠCOUR":37384,"Bridge":37385,"Ġspree":37386,"ĠCaldwell":37387,"Ġbribery":37388,"Ġ��������":37389,"plugins":37390,"Ġracket":37391,"Ġchampagne":37392,"versible":37393,"Vote":37394,"Ġmodifiers":37395,"Mayor":37396,"680":37397,"Ġassemblies":37398,"ĠSultan":37399,"ĠNing":37400,"ĠLadies":37401,"Ġsulfur":37402,"Ġorbs":37403,"Ġ-----":37404,"_______":37405,"ĠJournalism":37406,"Ġesports":37407,"Ġlush":37408,"Ġhue":37409,"Ġspectral":37410,"Honest":37411,"ãĥı":37412,"Ġbushes":37413,"Ġreinforcement":37414,"Ġreopened":37415,"ĠWheels":37416,"ĠMorg":37417,"rieving":37418,"Ġauxiliary":37419,"ĠjQuery":37420,"ĠBAT":37421,"tesque":37422,"Ġvertex":37423,"pure":37424,"frey":37425,"ãĤº":37426,"dos":37427,"Ġtyph":37428,"Ġcull":37429,"Ġeq":37430,"Ġdecon":37431,"Ġtossing":37432,"Ġdisparate":37433,"ĠBrigham":37434,"printf":37435,"ledged":37436,"Ġsund":37437,"Ġcozy":37438,"Ġhepatitis":37439,"performing":37440,"Ġaval":37441,"ĠGG":37442,"future":37443,"Ġpetertodd":37444,"ĠKosovo":37445,"Ġmagnets":37446,"Already":37447,"ĠEdison":37448,"ĠCeres":37449,"ĠRAID":37450,"Ġbrilliance":37451,"576":37452,"Ġderives":37453,"Ġhypertension":37454,"ĠÎĶ":37455,"Ġlambda":37456,"Ġflair":37457,"Ġmissionaries":37458,"Ġrapes":37459,"ĠStarter":37460,"ĠMonths":37461,"Ġdefy":37462,"Ġseismic":37463,"ĠRaphael":37464,"Ġeurozone":37465,"656":37466,"zsche":37467,"Ġscratched":37468,"Ġbows":37469,"ĠLennon":37470,"ĠGaia":37471,"Ġdripping":37472,"facts":37473,"Ale":37474,"Ġfrogs":37475,"ĠBreast":37476,"ogeneity":37477,"ĠProsecutor":37478,"Ġamplified":37479,"ĠHodg":37480,"ĠFn":37481,"Thousands":37482,"ĠNIH":37483,"ĠMonitoring":37484,"FTWARE":37485,"ĠPriebus":37486,"ĠGrowing":37487,"hunter":37488,"Ġdiagnose":37489,"ĠMald":37490,"ĠLR":37491,"Ġcrowned":37492,"Ġbursting":37493,"Ġdissolution":37494,"javascript":37495,"Ġusefulness":37496,"ĠExecution":37497,":(":37498,"ĠIvory":37499,"aah":37500,"Ġpersecuted":37501,"violence":37502,"istas":37503,"ĠCrate":37504,"Ġimpulses":37505,"ĠSpani":37506,"edes":37507,"Handle":37508,"ĠZerg":37509,"thinkable":37510,"Lastly":37511,"Ġspontaneously":37512,"Ġinconvenient":37513,"Ġdismissing":37514,"Ġplotted":37515,"Ġeighty":37516,"Ġ737":37517,"rish":37518,"ĠThornton":37519,"atham":37520,"Ġsitcom":37521,"Ven":37522,"Recipe":37523,"tel":37524,"lund":37525,"Ġclears":37526,"ĠSasuke":37527,"Ġ258":37528,"Ġopting":37529,"Ġenraged":37530,"esthetic":37531,"ĠAe":37532,"uchs":37533,"Prep":37534,"Flow":37535,"Ġrunoff":37536,"ĠEating":37537,"ĠGiles":37538,"ĠActing":37539,"resources":37540,"ibaba":37541,"Ġrpm":37542,"Ġskewed":37543,"ĠBlanc":37544,"ĠSakuya":37545,"Ġhotter":37546,"Ġ1924":37547,"opian":37548,"cko":37549,"Ġcrumbling":37550,"Ġcaptains":37551,"ĠAppropriations":37552,"leaders":37553,"dropping":37554,"anuts":37555,"Ġreversing":37556,"ĠPose":37557,"ĠSek":37558,"Scot":37559,"ĠIdea":37560,"cise":37561,"ĠSlovenia":37562,"Ġ317":37563,"Doctor":37564,"Ġcrocod":37565,"aldi":37566,"Sea":37567,"ĠFarrell":37568,"Ġmercenaries":37569,"ĠRNC":37570,"ĠGuess":37571,"Ġpacing":37572,"Machine":37573,"StreamerBot":37574,"ĠCharity":37575,"Ġ298":37576,"Ġcannons":37577,"ĠToby":37578,"TPPStreamerBot":37579,"ĠPassion":37580,"cfg":37581,"Thom":37582,"Ġbadges":37583,"ĠBernstein":37584,".âĢĵ":37585,"ĠPOP":37586,"ĠConj":37587,"Ġinitialization":37588,"Ġbiodiversity":37589,"Dub":37590,"Ġfeudal":37591,"Ġdisclaimer":37592,"Ġcrow":37593,"Ġignition":37594,"arf":37595,"SHA":37596,"ĠkHz":37597,"hazard":37598,"ĠArtists":37599,"oeuv":37600,"679":37601,"ĠRudy":37602,"Nine":37603,"ĠRamadan":37604,"å½":37605,"itto":37606,"Ġadrenaline":37607,"Cert":37608,"Ġsmelled":37609,"Ġimpunity":37610,"Ġagendas":37611,"ĠReborn":37612,"ĠConcent":37613,"ĠSeems":37614,"Ġomega":37615,"ĠDustin":37616,"Ġbacker":37617,"ĠSauce":37618,"ĠBoyle":37619,"WIN":37620,"Ġspins":37621,"Ġpauses":37622,"upt":37623,"Ġshredded":37624,"Ġstrapped":37625,"ĠCorruption":37626,"Ġscratches":37627,"Ġni":37628,"Ġattire":37629,"ĠSAF":37630,"FactoryReloaded":37631,"ĠIPS":37632,"Ġ(%":37633,"Ġseminar":37634,"focus":37635,"civil":37636,"Ġ1860":37637,"intosh":37638,"Ġcontinual":37639,"Ġabbrevi":37640,"ĠSok":37641,"ocobo":37642,"XM":37643,"Ġfrantic":37644,"Ġunavoidable":37645,"Ġartery":37646,"Ġannotations":37647,"bath":37648,"Climate":37649,"Ġdors":37650,"ĠSlide":37651,"coord":37652,"ĠReload":37653,"ĠLDL":37654,"ĠLovecraft":37655,"Ġunimagin":37656,"Ġresembled":37657,"Ġbarracks":37658,"np":37659,"Ġsurrogate":37660,"Ġcategorized":37661,"ãĤ©":37662,"Ġvaccinated":37663,"Ġdrainage":37664,"Ġindist":37665,"ĠWhatsApp":37666,"Ġ1870":37667,"olerance":37668,"invoke":37669,"amorph":37670,"Ġreconnect":37671,"Ġemanc":37672,"Ġblindness":37673,"Ġ1280":37674,"internet":37675,"collar":37676,"Ġaltru":37677,"Ġabyss":37678,"ĠTRI":37679,"657":37680,"Ġinfused":37681,"HEAD":37682,"Ġforestry":37683,"ĠWoody":37684,"ĠCi":37685,"wi":37686,"sam":37687,"784":37688,"holiday":37689,"Ġmogul":37690,"ĠFees":37691,"ĠDEN":37692,"Internal":37693,"urbed":37694,"fusc":37695,"atom":37696,"ĠIllusion":37697,"Ġpolled":37698,"Ġflap":37699,"Ġcoax":37700,"LGBT":37701,"Analy":37702,"ĠSections":37703,"ĠCaliforn":37704,"emn":37705,"Ġhither":37706,"ĠNIGHT":37707,"Ġnailed":37708,"ĠPipeline":37709,"391":37710,"oof":37711,"ĠPrimal":37712,"verend":37713,"Ġslashing":37714,"Ġretri":37715,"aviour":37716,"Ġdeparting":37717,"gil":37718,"ISC":37719,"Ġmidway":37720,"Ġultrasound":37721,"Ġbehaving":37722,"ĠTara":37723,"classes":37724,"Virtual":37725,"ĠColonial":37726,"Ġstripping":37727,"Ġorchestrated":37728,"ĠGraves":37729,"452":37730,"ĠIronically":37731,"ĠWriters":37732,"Ġlends":37733,"ĠManz":37734,"Ġraven":37735,"Ġoxidative":37736,"Ġ266":37737,"ELF":37738,"actually":37739,"ascar":37740,"Draft":37741,"Ġfavourable":37742,"Ġhumiliating":37743,"Ġfidelity":37744,"ĠHof":37745,"ĠXuan":37746,"496":37747,"Ġlayered":37748,"atis":37749,"790":37750,"Ġpaycheck":37751,"iton":37752,"Kar":37753,"ĠVMware":37754,"ĠFarmer":37755,"Ġservic":37756,"glomer":37757,"Ġslump":37758,"ĠFabric":37759,"ĠDOC":37760,"esting":37761,"Ġreassure":37762,"Ġphyl":37763,"volt":37764,"itory":37765,"Rules":37766,"Ġoxidation":37767,"Ġprized":37768,"Ġmistress":37769,"ĠDjango":37770,"WARN":37771,"åij":37772,"Ġencode":37773,"ĠFeedback":37774,"Ġstupidity":37775,"Ian":37776,"ĠYugoslavia":37777,"ר":37778,"acl":37779,"UTE":37780,"1977":37781,"Ġqualifies":37782,"Ġpulses":37783,"pretty":37784,"Ġfroze":37785,"Ġss":37786,"Iterator":37787,"Ġurgently":37788,"Ġmailed":37789,"ĠCham":37790,"Ġsustaining":37791,"Ġbasil":37792,"Ġpuppies":37793,"ilant":37794,"ĠPLEASE":37795,"lap":37796,"aceous":37797,"Fear":37798,"ĠMastery":37799,"automatic":37800,"ĠTAG":37801,"Ġantim":37802,"agles":37803,"473":37804,"frames":37805,"Ġwhispers":37806,"ĠWhoever":37807,"Ġbravery":37808,"ĠUKIP":37809,"ractions":37810,"\"\"\"":37811,"Ġtame":37812,"Ġparted":37813,"everything":37814,"CONT":37815,"Ġindebted":37816,"Ġaddr":37817,"rek":37818,"IRED":37819,"Ġeminent":37820,"clinton":37821,"Ġousted":37822,"Ġreviewer":37823,"Ġmeltdown":37824,"Ġrearr":37825,"ĠYao":37826,"thereal":37827,"abyte":37828,"Ġstumbling":37829,"Ġbatches":37830,"Ġ259":37831,"Ġcontraceptive":37832,"Ġprostitute":37833,"ensis":37834,"Decl":37835,"ĠStrikes":37836,"Military":37837,"ĠOath":37838,"vacc":37839,"ppings":37840,"052":37841,"ĠpartName":37842,"amping":37843,"Reports":37844,"KI":37845,"CHR":37846,"Ġsubtly":37847,"swers":37848,"Blake":37849,"usual":37850,"Ġcontestants":37851,"Ġcartridges":37852,"ĠGREAT":37853,"Ġblush":37854,"ĠâĢº":37855,"472":37856,"Ġreasoned":37857,"ãĥ¤":37858,"paralleled":37859,"Ġdyn":37860,"agate":37861,"Ġnightly":37862,"åĨ":37863,"556":37864,"Ġsemantic":37865,"ĠAdvoc":37866,"Ġ!!":37867,"Ġdisagrees":37868,"ĠBW":37869,"Veh":37870,"Ġharming":37871,"Ġembraces":37872,"Ġstrives":37873,"Ġinland":37874,"ĠKard":37875,"Ġheats":37876,"ĠGinny":37877,"utan":37878,"ernaut":37879,"ylene":37880,"ĠElev":37881,"JD":37882,"Ġhars":37883,"ĠStarr":37884,"Ġskysc":37885,"Ġcollaborators":37886,"Usually":37887,"Ġrevolutions":37888,"ĠSTATS":37889,"Ġdismantle":37890,"Ġconfidently":37891,"Ġkinetic":37892,"Ali":37893,"Ġpercentile":37894,"Ġextracting":37895,"illian":37896,"estead":37897,"Ġphysicists":37898,"ĠMarshal":37899,"Ġfellowship":37900,"Ġdashed":37901,"ĠUR":37902,"ĠSioux":37903,"ĠCompact":37904,"amide":37905,"Python":37906,"ĠLeigh":37907,"ĠPharmac":37908,"istrates":37909,"herical":37910,"Ġfue":37911,"ĠEmin":37912,"Ġ({":37913,"ĠNeighborhood":37914,"Ġdisrupting":37915,"ĠDup":37916,"Ġgland":37917,"ĠSev":37918,"ĠMarian":37919,"argon":37920,"ĠDund":37921,"Ġ":46904,"ĠPhilips":46905,"ĠKafka":46906,"Ġupheaval":46907,"Ġsentimental":46908,"Ġsax":46909,"ĠAkira":46910,"serial":46911,"Matrix":46912,"Ġelecting":46913,"Ġcommenter":46914,"ĠNebula":46915,"plets":46916,"ĠNadu":46917,"ĠAdren":46918,"Ġenshr":46919,"ĠRAND":46920,"financial":46921,"ĠClyde":46922,"utherford":46923,"Ġsignage":46924,"Ġdeline":46925,"Ġphosphate":46926,"roversial":46927,"fascist":46928,"ĠVall":46929,"ĠBethlehem":46930,"Ġfors":46931,"Ġenglish":46932,"Solid":46933,"Nature":46934,"Ġva":46935,"ĠGuests":46936,"Ġtantal":46937,"Ġautoimmune":46938,";;;;;;;;;;;;":46939,"ĠTotally":46940,"ĠOv":46941,"Ġdefences":46942,"ĠCoconut":46943,"Ġtranquil":46944,"Ġploy":46945,"Ġflavours":46946,"ĠFlask":46947,"ãĤ¨ãĥ«":46948,"ĠWeston":46949,"ĠVolvo":46950,"870":46951,"Ġmicrophones":46952,"verbal":46953,"RPG":46954,"Ġiii":46955,";}":46956,"028":46957,"Ġheadlined":46958,"Ġprimed":46959,"Ġhoard":46960,"ĠShad":46961,"ĠENTER":46962,"Ġtriangular":46963,"Ġcapit":46964,"lik":46965,"ĠAncients":46966,"Ġlash":46967,"Ġconvol":46968,"Ġcolonel":46969,"enemy":46970,"Gra":46971,"Ġpubs":46972,"utters":46973,"Ġassigns":46974,"ĠPenet":46975,"ĠMonstrous":46976,"ĠBowen":46977,"ilver":46978,"Haunted":46979,"ĠDing":46980,"started":46981,"plin":46982,"Ġcontaminants":46983,"ĠDOE":46984,"ffen":46985,"ĠTechnician":46986,"Ry":46987,"Ġrobbers":46988,"Ġhotline":46989,"ĠGuardiola":46990,"ĠKaufman":46991,"rower":46992,"ĠDresden":46993,"ĠAlpine":46994,"Elf":46995,"Ġfmt":46996,"ĠSard":46997,"urses":46998,"gpu":46999,"Unix":47000,"Ġunequivocally":47001,"ĠCitizenship":47002,"quad":47003,"mire":47004,"ĠSweeney":47005,"Battery":47006,"615":47007,"Ġpancakes":47008,"Ġoats":47009,"Maps":47010,"ĠContrast":47011,"mbudsman":47012,"ĠEPS":47013,"Ġsubcommittee":47014,"Ġsourcing":47015,"Ġsizing":47016,"ĠBuffer":47017,"ĠMandatory":47018,"Ġmoderates":47019,"ĠPatterns":47020,"ĠChocobo":47021,"ĠZan":47022,"ĠSTATES":47023,"ĠJudging":47024,"ĠInher":47025,"*:":47026,"Ġbil":47027,"ĠYen":47028,"Ġexhilar":47029,"ollower":47030,"zers":47031,"Ġsnug":47032,"maximum":47033,"Ġdespicable":47034,"ĠPACK":47035,"ĠAnnex":47036,"Ġsarcastic":47037,"Ġlatex":47038,"Ġtamp":47039,"ĠSao":47040,"bah":47041,"ĠReverend":47042,"ĠChinatown":47043,"ĠAUT":47044,"documented":47045,"ĠGABA":47046,"ĠCanaan":47047,"ĠÙħ":47048,"Ġgoverns":47049,"prev":47050,"Esc":47051,"ĠEstimates":47052,"OSP":47053,"Ġendeavour":47054,"ĠClosing":47055,"ometime":47056,"everyone":47057,"Ġworsen":47058,"Ġscanners":47059,"Ġdeviations":47060,"ĠRobotics":47061,"ĠCompton":47062,"Ġsorcerer":47063,"Ġendogenous":47064,"Ġemulation":47065,"ĠPiercing":47066,"ĠAph":47067,"ĠSocket":47068,"Ġbould":47069,"ĠOU":47070,"ĠBorderlands":47071,"Ġ1863":47072,"Gordon":47073,"ĠWTO":47074,"Ġrestricts":47075,"Ġmosaic":47076,"Ġmelodies":47077,"çĦ":47078,"Tar":47079,"Ġdisson":47080,"ĠProvides":47081,"Ġ......":47082,"bek":47083,"FIX":47084,"Ġbroom":47085,"anship":47086,"Doctors":47087,"Ġnerds":47088,"ĠRegions":47089,"naissance":47090,"Ġmete":47091,"Ġcrept":47092,"plings":47093,"Ġgirlfriends":47094,"knit":47095,"igent":47096,"owe":47097,"Ġushered":47098,"ĠBaz":47099,"Mobil":47100,"434":47101,"ĠPresents":47102,"origin":47103,"Ġinsomnia":47104,"ĠAux":47105,"439":47106,"ĠChili":47107,"irsch":47108,"GAME":47109,"Ġgestation":47110,"algia":47111,"romising":47112,"$,":47113,"crow":47114,"ĠInspection":47115,"atomic":47116,"Relations":47117,"JOHN":47118,"roman":47119,"ĠClockwork":47120,"ĠBakr":47121,"mone":47122,"MET":47123,"Ġthirsty":47124,"Ġbc":47125,"Ġfaculties":47126,"Rum":47127,"Ġnuance":47128,"ĠDarius":47129,"pleting":47130,"fters":47131,"etchup":47132,"Registration":47133,"ĠKE":47134,"Rah":47135,"Ġpreferential":47136,"ĠLash":47137,"ĠHH":47138,"Valid":47139,"ĠNAV":47140,"Ġstarve":47141,"ĠGong":47142,"zynski":47143,"ĠActress":47144,"Ġwik":47145,"Ġunaccompanied":47146,"lvl":47147,"Bride":47148,"ADS":47149,"ĠCommando":47150,"ĠVaughn":47151,"Wallet":47152,"Ġhopping":47153,"ĠVie":47154,"Ġcaveats":47155,"Ġalas":47156,"ifled":47157,"abuse":47158,"661":47159,"Ġibn":47160,"Ġgul":47161,"Ġrobbing":47162,"til":47163,"ILA":47164,"Ġmitigating":47165,"Ġaptly":47166,"Ġtyrant":47167,"Ġmidday":47168,"ĠGilmore":47169,"ĠDecker":47170,"Ġ§§":47171,"partial":47172,"Exactly":47173,"Ġphenotype":47174,"Ġ[+]":47175,"ĠPlex":47176,"ĠIps":47177,"versions":47178,"Ġebook":47179,"Ġchic":47180,"gross":47181,"\":\"\"},{\"":47182,"ĠSurprisingly":47183,"Morgan":47184,"Ġresidues":47185,"ĠConfederation":47186,"infeld":47187,"Ġlyr":47188,"moderate":47189,"Ġperpendicular":47190,"VK":47191,"Ġsynchronized":47192,"Ġrefreshed":47193,"Ġadore":47194,"ĠTorment":47195,"olina":47196,"Ġ2600":47197,"ItemTracker":47198,"Ġpies":47199,"ĠFAT":47200,"ĠRHP":47201,"048":47202,"ĠRESP":47203,"ĠBJ":47204,"allows":47205,"Pand":47206,"Ġunwelcome":47207,"ĠVoc":47208,"ĠBastard":47209,"ĠOW":47210,"ĠLAR":47211,"ĠHealer":47212,"Environmental":47213,"ĠKenyan":47214,"ĠTrance":47215,"ĠPats":47216,"Ġaliases":47217,"ĠGarfield":47218,"Ġcampaigner":47219,"Ġadvancements":47220,"ĠOkinawa":47221,"ĠCoh":47222,"owsky":47223,"Ġstarved":47224,"Ġsizeable":47225,"Ġ:-)":47226,"ĠmRNA":47227,"Ġsuspensions":47228,"istar":47229,"Scotland":47230,"Prin":47231,"------------------------------------------------":47232,"Ġ502":47233,"Ġteaspoons":47234,"Ġ1050":47235,"Ġcoercive":47236,"ĠMasonic":47237,"edded":47238,"ĠPassenger":47239,"Ġlatt":47240,"Ġbraces":47241,"ĠSteal":47242,"ĠNYT":47243,"ĠKats":47244,"ĠCelest":47245,"aez":47246,"Tu":47247,"ĠCoulter":47248,"ðŁĺ":47249,"Flickr":47250,"ĠWilmington":47251,"iths":47252,"++;":47253,"Ġvending":47254,"Ġnegro":47255,"ĠPhi":47256,"ĠYellowstone":47257,"Callback":47258,"Ġshampoo":47259,"ĠShades":47260,"wat":47261,"Ġsuperhuman":47262,"Ġridiculed":47263,"Ġholiest":47264,"ombo":47265,"Ġinterns":47266,"Ġhone":47267,"ĠParagu":47268,"URI":47269,"Ġdangling":47270,"ãĤ»":47271,"sov":47272,"ictional":47273,"availability":47274,"Ġrevocation":47275,"Ġdow":47276,"inic":47277,"ĠTHEIR":47278,"Ġiso":47279,"Ġoutings":47280,"ĠLethal":47281,"Ġ)))":47282,"Ġinaccur":47283,"Ġoutlandish":47284,"Ġanus":47285,"letico":47286,"idon":47287,"lol":47288,"Ġunregulated":47289,"Ġsuccumbed":47290,"Ġcuff":47291,"ĠWasteland":47292,"letal":47293,"Ġsubstr":47294,"Ġcoffers":47295,"Ġautomakers":47296,"ovi":47297,"ĠXue":47298,"ĠDaytona":47299,"Ġjarring":47300,"Ġfumes":47301,"Ġdisbanded":47302,"zik":47303,"itton":47304,"Ġstrikingly":47305,"Ġspores":47306,"Adapter":47307,".):":47308,"ĠLyndon":47309,"ivalry":47310,"Ġorally":47311,"Ġtumultuous":47312,"Ġdispleasure":47313,"Ġcones":47314,"orrect":47315,"Ġappease":47316,"Ġderby":47317,"ĠTripoli":47318,"ĠAless":47319,"Ġpoked":47320,"ĠGuilty":47321,"vP":47322,"Enough":47323,"Ġoriginals":47324,"699":47325,"Ġrabbi":47326,"Ġproverbial":47327,"Ġpostpone":47328,"elope":47329,"ĠMisty":47330,"Ġstaffed":47331,"ĠUnemployment":47332,"reditary":47333,"Ġdiligent":47334,"recomm":47335,"measures":47336,"asin":47337,"825":47338,"Ġponds":47339,"Ġmmol":47340,"ĠSAR":47341,"ĠCARE":47342,"Ġ371":47343,"Ġclenched":47344,"ĠCorsair":47345,"Ġcaricature":47346,"zn":47347,"attach":47348,"ĠSchro":47349,"speak":47350,"painted":47351,"ĠSuc":47352,"ĠENT":47353,"Ġcellul":47354,"ĠPaid":47355,"diagn":47356,"WHERE":47357,"Ġtexted":47358,"Barn":47359,"Ġretracted":47360,"ĠReferred":47361,"Sav":47362,"Ġupkeep":47363,"Ġworkplaces":47364,"ĠTokens":47365,"Ġamplify":47366,"clinical":47367,"Ġmultic":47368,"mberg":47369,"Ġconvoluted":47370,"Region":47371,"565":47372,"ĠTopic":47373,"Ġsnail":47374,"Ġsaline":47375,"Ġinsurrection":47376,"ĠPetr":47377,"forts":47378,"BAT":47379,"ĠNavajo":47380,"Ġrudimentary":47381,"ĠLaksh":47382,"ONDON":47383,"Measure":47384,"Ġtransformer":47385,"ĠGoddard":47386,"Ġcoincides":47387,"irin":47388,"Rex":47389,"ĠBok":47390,"quit":47391,"Ġshotguns":47392,"Ġproletarian":47393,"Ġscorp":47394,"ĠAda":47395,"514":47396,"Ġslander":47397,"recorded":47398,"Ġembell":47399,"risome":47400,"Ġapologizing":47401,"ĠMulcair":47402,"ĠGibraltar":47403,"Cla":47404,"Ġallot":47405,"ĠAttention":47406,"Ġ433":47407,"leave":47408,"Ġwhine":47409,"ĠIssa":47410,"ĠFaust":47411,"ĠBarron":47412,"heny":47413,"Ġvictimized":47414,"Jews":47415,"Ġnurturing":47416,"ettel":47417,"Winged":47418,"ĠSubtle":47419,"Ġflavorful":47420,"ĠReps":47421,"enged":47422,"callback":47423,"Ġdirectional":47424,"Ġclasp":47425,"ĠDirections":47426,"planet":47427,"iculture":47428,"Helper":47429,"icion":47430,"acia":47431,"Ġç¥ŀ":47432,"Ġsurges":47433,"Ġcanoe":47434,"ĠPremiership":47435,"been":47436,"Ġdefied":47437,"ĠTrooper":47438,"Ġtripod":47439,"Ġgasp":47440,"ĠEuph":47441,"ĠAds":47442,"vernight":47443,"highly":47444,"Role":47445,"Ġentangled":47446,"ĠZeit":47447,"618":47448,"ĠRusty":47449,"Ġhavens":47450,"ĠVaughan":47451,"HAEL":47452,"ĠSERVICE":47453,"/,":47454,"Ġstricken":47455,"Ġdelusions":47456,"Ġbis":47457,"ĠHaf":47458,"Ġgratification":47459,"Ġenticing":47460,"UNCH":47461,"Adams":47462,"ĠOLED":47463,"ĠBeetle":47464,"Ġ1899":47465,"ĠSOFTWARE":47466,"ategor":47467,"VL":47468,"ĠTotem":47469,"ĠGators":47470,"ATURES":47471,"Ġimpedance":47472,"Registered":47473,"ĠCary":47474,"ĠAerial":47475,"onne":47476,"enium":47477,"Ġdred":47478,"ĠBeg":47479,"Ġconcurrently":47480,"Ġsuperpower":47481,"ĠXan":47482,"jew":47483,"imester":47484,"ĠDickinson":47485,"âĶģ":47486,"Fla":47487,"Ġpree":47488,"ĠRollins":47489,"©¶æ":47490,"Ġdenomination":47491,"ĠLana":47492,"516":47493,"Ġinciting":47494,"scribed":47495,"juries":47496,"ĠWonders":47497,"approximately":47498,"Ġsuspending":47499,"Ġmountainous":47500,"ĠLaugh":47501,"oidal":47502,"Ns":47503,"Detect":47504,")=":47505,"ĠLuthor":47506,"ĠSchwarzenegger":47507,"ĠMuller":47508,"ĠDevi":47509,"ecycle":47510,"Jar":47511,"613":47512,"ĠLongh":47513,"Bah":47514,"ĠSPORTS":47515,"nw":47516,"Ġrefinement":47517,"Ġwaterways":47518,"Ġdiner":47519,"Blade":47520,"683":47521,"Fac":47522,"Ġinitials":47523,"Ġrog":47524,"Ġparanormal":47525,"BUT":47526,"Ġ[(":47527,"ĠSwanson":47528,"ĠMesh":47529,"âĸ¬":47530,"Improve":47531,"ĠRadiation":47532,"ĠEsther":47533,"ĠEsk":47534,"ĠAly":47535,"iky":47536,"Ġirrad":47537,"ĠBuckingham":47538,"Ġrefill":47539,"Ġ._":47540,"Repe":47541,"CONCLUS":47542,"Ġdifferentiated":47543,"Ġchirop":47544,"ĠAtkins":47545,"Pattern":47546,"Ġexcise":47547,"Ġcabal":47548,"NSA":47549,"ĠSTA":47550,"ĠSIL":47551,"ĠParaly":47552,"Ġrye":47553,"ĠHowell":47554,"ĠCountdown":47555,"nesses":47556,"alysed":47557,"Ġresize":47558,"ãĤ½":47559,"Ġbudgetary":47560,"ĠStras":47561,"wang":47562,"Ġapiece":47563,"Ġprecincts":47564,"Ġpeach":47565,"Ġskyline":47566,"Ġ353":47567,"popular":47568,"Appearances":47569,"ĠMechanics":47570,"ĠDevOnline":47571,"Sullivan":47572,"Zen":47573,"Ġpu":47574,"opolis":47575,"544":47576,"Ġdeform":47577,"Ġcounteract":47578,"ĠLange":47579,"Ġ417":47580,"Console":47581,"774":47582,"Ġnodding":47583,"Ġpopulism":47584,"Ġhep":47585,"Ġcounselling":47586,"compliance":47587,"UFF":47588,"Ġundeniably":47589,"Ġrailing":47590,"ĠHorowitz":47591,"ĠSimone":47592,"ĠBungie":47593,"Ġak":47594,"ĠTalks":47595,"xff":47596,"flake":47597,"Crash":47598,"Ġsweaty":47599,"Ġbanquet":47600,"ĠOFFIC":47601,"Ġinventive":47602,"Ġastronomer":47603,"ĠStamford":47604,"ĠScare":47605,"ĠGREEN":47606,"olicited":47607,"Ġrusher":47608,"Ġcentrist":47609,"ighting":47610,"Ġsubclass":47611,"Ġdisav":47612,"Ġdefund":47613,"ĠNanto":47614,"ociate":47615,"mast":47616,"Ġpacif":47617,"Ġmend":47618,"eers":47619,"immigration":47620,"ESSION":47621,"Ġnumbering":47622,"Ġlaughable":47623,"ĠEnded":47624,"viation":47625,"emark":47626,"Pitt":47627,"Ġmeticulous":47628,"ĠLF":47629,"Ġcongratulated":47630,"ĠBirch":47631,"Ġswayed":47632,"Ġsemifinals":47633,"Ġhumankind":47634,"matter":47635,"ĠEquip":47636,"opausal":47637,"Said":47638,"ĠLayout":47639,"Ġvoicing":47640,"Ġthug":47641,"Ġpornographic":47642,"IPS":47643,"Ġmoaning":47644,"Ġgrievance":47645,"Ġconfessions":47646,"escal":47647,"TEXTURE":47648,"Authent":47649,"osaurus":47650,"Purchase":47651,"Ġrelegation":47652,"alter":47653,"Ġ³³":47654,"Ġriddled":47655,"Ġogre":47656,"ĠLowell":47657,"Occup":47658,"Eat":47659,"ĠHyder":47660,"ĠAdviser":47661,"Commerce":47662,"Hunt":47663,"ĠOrth":47664,"ĠCompetitive":47665,"ĠCLA":47666,"CDC":47667,"Ġsalads":47668,"Fle":47669,"Ġindustrialized":47670,"`,":47671,"ĠOWN":47672,"Ġbeck":47673,"ĠParticularly":47674,"oubt":47675,"ĠmM":47676,"ĠHussain":47677,"ĠChennai":47678,"Ġ920":47679,"Ġappointing":47680,"ĠCullen":47681,",,,,,,,,":47682,"Ġpores":47683,"verified":47684,"Ġbiochemical":47685,"emate":47686,"Ġcowardly":47687,"ĠHelsinki":47688,"ĠEthiopian":47689,"SOURCE":47690,"ERC":47691,"estro":47692,"Ġbiotech":47693,"ĠSour":47694,"Ġbrewer":47695,"Bloomberg":47696,"Ġintensify":47697,"Glass":47698,"anco":47699,"ĠFDR":47700,"greSQL":47701,"ĠFires":47702,"©¶æ¥µ":47703,"eco":47704,"1001":47705,"ĠHomeless":47706,"Ġinstantaneous":47707,"ĠHaste":47708,"igel":47709,"Diamond":47710,"Ġpaving":47711,"Ġlandfill":47712,"Ġdads":47713,"houn":47714,":]":47715,"Ġincendiary":47716,"ĠLivingston":47717,"ĠHilbert":47718,"ĠChecks":47719,"styles":47720,"inators":47721,"ĠClive":47722,"phrine":47723,"Ġchimpanzees":47724,"Ġpall":47725,"ĠJM":47726,"ĠAadhaar":47727,"ðĿ":47728,"Ġachievable":47729,"disabled":47730,"PET":47731,"OOOOOOOO":47732,"Mot":47733,"Ġintangible":47734,"Ġballet":47735,"ĠWebs":47736,"ĠEstimated":47737,"Effects":47738,"Ġbailed":47739,"Joshua":47740,"Ġturbulence":47741,"Ġoccupant":47742,"ĠDaylight":47743,"Ġ361":47744,"meet":47745,"Ġstatically":47746,"Ġonlook":47747,"Ġki":47748,"illegal":47749,"Ġvelvet":47750,"Ġdehydration":47751,"Ġacquies":47752,"ĠRez":47753,"akura":47754,"ĠUpton":47755,"atro":47756,"Ġincomprehensible":47757,"Ġbackdoor":47758,"ĠRhino":47759,"727":47760,"Ġmaths":47761,")+":47762,"Ġheresy":47763,"Ġdf":47764,"ĠRoche":47765,"ĠLydia":47766,"Ġpancreat":47767,"reply":47768,"arrell":47769,"Ġsolicitation":47770,"Ġcircadian":47771,"BIP":47772,"Ġforay":47773,"Ġcryptic":47774,"izu":47775,"imeo":47776,"ĠTomato":47777,"ĠHoms":47778,"examination":47779,"Ġquarry":47780,"ĠValiant":47781,"ĠJericho":47782,"ĠINCLUD":47783,"Ġ1840":47784,"519":47785,"Ġresists":47786,"Ġsnapshots":47787,"ĠSpur":47788,"ĠAntiqu":47789,"Login":47790,"Ġbestselling":47791,"Ġantic":47792,"ĠSutherland":47793,"ãĤ¢ãĥ«":47794,"Ġ~/":47795,"ĠParm":47796,"èĥ":47797,"Pages":47798,"intensity":47799,"Ġimmobil":47800,"Ġ1865":47801,"zzo":47802,"Ġnifty":47803,"Ġfentanyl":47804,"ĠPreservation":47805,"ophen":47806,"Ġdarts":47807,"ĠDinosaur":47808,"pointers":47809,"ĠRite":47810,"suggest":47811,"awareness":47812,"ĠSheridan":47813,"Ġstances":47814,"Ġsorcery":47815,"Ġperjury":47816,"ĠNikola":47817,"iever":47818,"Ġfiance":47819,"ĠJordanian":47820,"ĠBalloon":47821,"Ġnab":47822,"Ġkb":47823,"Ġhumanities":47824,"ĠTanaka":47825,"hillary":47826,"Ġconsultancy":47827,"ĠZub":47828,"Ġremission":47829,"Ġconfid":47830,"CHQ":47831,"ĠFug":47832,"Ġimprovis":47833,"Yep":47834,"/_":47835,"Ġunwillingness":47836,"Ġportfolios":47837,"055":47838,"ĠInstructor":47839,"aiman":47840,"Ġclaimants":47841,"Mbps":47842,"ĠBye":47843,"received":47844,"Tweet":47845,"Ġindemn":47846,"riz":47847,"amara":47848,"Nat":47849,"Ġevaluates":47850,"ĠLur":47851,"epad":47852,"FOX":47853,"ĠThro":47854,"Ġrusty":47855,"Ġbedrock":47856,"ĠOprah":47857,"JB":47858,"Ġmanipulative":47859,"Ġwillful":47860,"Ġrelapse":47861,"Ġextant":47862,"Theme":47863,"Sensor":47864,"ĠStability":47865,"govern":47866,"Ġpoppy":47867,"Ġknack":47868,"Ġinsulated":47869,"ĠTile":47870,"ĠExtrem":47871,"Ġuntold":47872,"Ġconverge":47873,"Ġrefuel":47874,"igroup":47875,"Ġdistortions":47876,"Ġravaged":47877,"Ġmechanically":47878,"ĠReilly":47879,"ĠNose":47880,"ĠIncarnation":47881,"ĠBecky":47882,"abbling":47883,"Ġtaco":47884,"Ġrake":47885,"Ġmelancholy":47886,"Ġillustrious":47887,"ĠDartmouth":47888,"Guide":47889,"ĠRazer":47890,"ĠBenz":47891,"Ultimate":47892,"ĠSurprise":47893,"Ġpageant":47894,"offer":47895,"Whoever":47896,"Ġwiser":47897,"Ġchemist":47898,"ĠHELL":47899,"ĠBulk":47900,"Ġplutonium":47901,"ĠCOVER":47902,"Ö¼":47903,"failed":47904,"Ġtirelessly":47905,"Ġinfertility":47906,"ĠTrident":47907,"ĠShowtime":47908,"ĠCiv":47909,"Vice":47910,"requires":47911,"ittance":47912,"Ġuncontrolled":47913,"interesting":47914,"561":47915,"Ġinnovate":47916,"ategic":47917,"Lie":47918,"ĠSelling":47919,"Ul":47920,"Ġsavior":47921,"ĠTosh":47922,"Ġswast":47923,"PASS":47924,"Ġrink":47925,"Ġcardio":47926,"ĠIro":47927,"udi":47928,"Ġvantage":47929,"Ġvans":47930,"ĠNiño":47931,"+=":47932,"Ġpropagate":47933,"":49029,"Ġleukemia":49030,"Ġeluc":49031,"Ġannouncer":49032,"ĠLithuan":49033,"ĠArmageddon":49034,"åĩ":49035,"Lenin":49036,"ĠRuk":49037,"Ġpepp":49038,"ĠRomantic":49039,"ĠPIT":49040,"ĠInterstellar":49041,"ĠAtkinson":49042,"Raid":49043,"Js":49044,"Goal":49045,"Course":49046,"Ġvanishing":49047,"esley":49048,"ĠRounds":49049,"Elsa":49050,"593":49051,"Ġredundancy":49052,"ĠSTAND":49053,"Ġprophetic":49054,"Ġhabitable":49055,"ryu":49056,"Ġfaintly":49057,"MODE":49058,"Ġflanked":49059,"IRC":49060,"Awesome":49061,"Ġspurious":49062,"ĠZah":49063,"ĠMSG":49064,"Ġshading":49065,"Ġmotivational":49066,"ĠSantana":49067,"ĠSPR":49068,"Ġexcruciating":49069,"omial":49070,"ĠMiko":49071,"ĠLeopard":49072,"Abyss":49073,"Ġ[|":49074,"dirty":49075,"Ġbaths":49076,"Ġdemoral":49077,"andre":49078,"PB":49079,"Ġunification":49080,"Ġsacrament":49081,"Ġ[&":49082,"Ġpriceless":49083,"Ġgelatin":49084,"Ġemanating":49085,"ĠAllaah":49086,"986":49087,"Ġoutburst":49088,"Ġeras":49089,"ĠXVI":49090,"ĠSPI":49091,"Ott":49092,"ĠLazarus":49093,"PLIED":49094,"Flying":49095,"blogs":49096,"Wisconsin":49097,"Raven":49098,"Ġrebate":49099,"Ġcreeps":49100,"ĠSpan":49101,"ĠPainter":49102,"ĠKira":49103,"ĠAmos":49104,"ĠCorvette":49105,"Consumer":49106,"ĠRecover":49107,"cki":49108,"Ġpesky":49109,"ĠInvention":49110,"Companies":49111,"Ġchallengers":49112,"ademic":49113,"ĠUkrainians":49114,"ĠNeurolog":49115,"ĠForsaken":49116,"Ġentrants":49117,"Ġembattled":49118,"Ġdefunct":49119,"ĠGlacier":49120,"Ġpoisons":49121,"ĠHorses":49122,"makes":49123,"ĠDirt":49124,"Ġ423":49125,"hhh":49126,"ĠTransformation":49127,"QUIRE":49128,"..................":49129,"Ġtraveller":49130,"ĠSexy":49131,"ĠKern":49132,"ipolar":49133,"Ġransomware":49134,"oooooooooooooooo":49135,"Ec":49136,"ruby":49137,"Professional":49138,"ĠOutbreak":49139,"argument":49140,"Grey":49141,"ĠFifa":49142,"ĠCHO":49143,"ĠFORM":49144,"ĠAmtrak":49145,"-[":49146,"Ġcradle":49147,"Ġantioxidants":49148,"ãģ®å®":49149,"736":49150,"ĠNASL":49151,"ĠContributions":49152,"Indiana":49153,"ĠSTEP":49154,"CSS":49155,"Ġsalient":49156,"Ġallocations":49157,"yrights":49158,"Ġmashed":49159,"ĠCutter":49160,"Sexual":49161,"Ġpounded":49162,"Ġfanbase":49163,"Ġcasc":49164,"ĠTransparency":49165,"Ġanalytic":49166,"ĠSummoner":49167,"×ŀ":49168,"ĠADC":49169,"detail":49170,"Ġvanquished":49171,"Ġcrabs":49172,"arie":49173,"Destroy":49174,"ĠSack":49175,"Ġtransistor":49176,"Alabama":49177,"ĠKoen":49178,"ĠFisheries":49179,"cone":49180,"Ġannexed":49181,"ĠMGM":49182,"esa":49183,"Ġfaked":49184,"ĠCongratulations":49185,"Ġhindered":49186,"Ġcorrectional":49187,"ĠITV":49188,"leeve":49189,"Ġinappropriately":49190,"licks":49191,"Ġtrespass":49192,"Ġpaws":49193,"Ġnegotiator":49194,"ĠChristensen":49195,"limits":49196,"ĠDianne":49197,"Ġelegance":49198,"ĠContracts":49199,"anke":49200,"Obj":49201,"Ġvigilance":49202,"Ġcastles":49203,"ĠNAD":49204,"ĠHolo":49205,"Ġemphatically":49206,"ĠTitus":49207,"ĠServing":49208,"ĠRichie":49209,"ĠPigs":49210,"568":49211,"Ġanimosity":49212,"ĠAttributes":49213,"ĠUriel":49214,"MQ":49215,"myra":49216,"ĠApplicant":49217,"Ġpsychiatrists":49218,"ĠVij":49219,"ĠAbby":49220,"agree":49221,"Push":49222,"ĠkWh":49223,"hiba":49224,"Ġincite":49225,"ĠWeasley":49226,"ĠTaxi":49227,"ministic":49228,"hyper":49229,"ĠFarn":49230,"Ġ601":49231,"ĠNationwide":49232,"Fake":49233,"952":49234,"Ġmaize":49235,"Ġinteracted":49236,"Ġtransitioned":49237,"Ġparasitic":49238,"Ġharmonic":49239,"Ġdecaying":49240,"Ġbaseless":49241,"nsics":49242,"Ġtranspired":49243,"Ġabundantly":49244,"ĠForensic":49245,"Ġtreadmill":49246,"ĠJav":49247,"aband":49248,"Ġsshd":49249,"Ġfrontman":49250,"ĠJakarta":49251,"oller":49252,"drops":49253,"ĠSERVICES":49254,"romptu":49255,"ophical":49256,"hospital":49257,"bledon":49258,"645":49259,"Ġmidrange":49260,"ĠEVENT":49261,"culated":49262,"rawled":49263,"Ġperched":49264,"Ġoverboard":49265,"ĠPeel":49266,"ĠPwr":49267,"ĠCarth":49268,"ĠCOMPLE":49269,"coe":49270,"shall":49271,"Ġdeterrence":49272,"METHOD":49273,"ĠAbsent":49274,"MEN":49275,"Ġsill":49276,"ĠLEVEL":49277,"York":49278,"Ġsinners":49279,"ĠOPEC":49280,"ĠNur":49281,"ĠDesigns":49282,"selection":49283,"Ġunworthy":49284,"CHA":49285,"Ġstrengthens":49286,"883":49287,"edly":49288,"Ġslicing":49289,"Ġmalnutrition":49290,"Ġfilmmaking":49291,"ĠPolk":49292,"urated":49293,"Ġ421":49294,"breakers":49295,"!'\"":49296,"Ġwetlands":49297,"ĠDiscrimination":49298,"Ġallowable":49299,"Ġsteered":49300,"ĠSicily":49301,"SAM":49302,"Ġmustache":49303,"Ġmids":49304,"Ġclipped":49305,"Ġcirculate":49306,"Ġbrittle":49307,"ĠBuildings":49308,"raised":49309,"ĠRoundup":49310,"Ġwealthier":49311,"Ġoverwrite":49312,"Ġoverpowered":49313,"ĠGerrard":49314,"sites":49315,"PDATED":49316,"Ġacutely":49317,"ĠGamble":49318,"Ġpim":49319,"ĠKus":49320,"Typically":49321,"Deploy":49322,"ĠMoroccan":49323,"potion":49324,"combe":49325,"Ġvigilante":49326,"Ġ363":49327,"Stew":49328,"ĠBagg":49329,"Ġresided":49330,"ĠSpo":49331,"Ġremnant":49332,"Ġemptiness":49333,"brainer":49334,"Ġoutpatient":49335,"priority":49336,"Ġleptin":49337,"ĠPayton":49338,"ĠGleaming":49339,"ĠShed":49340,"ĠPolo":49341,"ĠMormonism":49342,"restricted":49343,"arlane":49344,"wx":49345,"Ġcreatine":49346,"ĠAnon":49347,"ĠSTUD":49348,"ĠJUL":49349,"ĠTee":49350,"528":49351,"089":49352,"Ġhatched":49353,"Dispatch":49354,"ĠComposite":49355,"Ġ451":49356,"puff":49357,"ĠXCOM":49358,"ĠOrn":49359,"ĠTHANK":49360,"ENDED":49361,"ĠAsheville":49362,"ĠÃľ":49363,"Ġmango":49364,"ĠSlightly":49365,"worldly":49366,"ĠWander":49367,"ĠExpand":49368,"ĠChr":49369,"Mist":49370,"Ġorthodoxy":49371,"ĠUNESCO":49372,"regate":49373,"Elsewhere":49374,"kie":49375,"irled":49376,"Ġtopple":49377,"Ġadoptive":49378,"ĠLegs":49379,"dress":49380,"ĠSagan":49381,"bare":49382,"ĠGlou":49383,"Crunch":49384,"Ġhelpers":49385,"Ġchronically":49386,"ĠHuma":49387,"10000":49388,"Ġaccommodating":49389,"äºĶ":49390,"Ġwrinkles":49391,"Ġdodged":49392,"fourth":49393,"Ġprecon":49394,"Ġcompressor":49395,"ĠKare":49396,"Ġevict":49397,"ĠWarwick":49398,"imar":49399,"Ġmodernization":49400,"Ġbandwagon":49401,"Ġrefuted":49402,"Ġnetted":49403,"ĠNaples":49404,"ĠGenie":49405,"perors":49406,"Ġfielded":49407,"Ġdere":49408,"ĠParables":49409,"lees":49410,"Ġtrout":49411,"aspers":49412,"Ġnihil":49413,"Ġhappiest":49414,"Ġfloppy":49415,"ĠLoft":49416,"ĠHeard":49417,"Ġunison":49418,"Ġlug":49419,"ĠRedmond":49420,"classic":49421,"Supporters":49422,"SHIP":49423,"GMT":49424,"Ġfuelled":49425,"çIJ":49426,"Ġdd":49427,"ĠEminem":49428,"Ġ1897":49429,"NYSE":49430,"Ġsecretaries":49431,"ĠFIA":49432,"ĠCanaveral":49433,"Favorite":49434,"Ġpomp":49435,"Ġdetainee":49436,"ership":49437,"aimon":49438,"iour":49439,"ĠApex":49440,"Ġplantations":49441,"amia":49442,"acion":49443,"Rust":49444,"Ġtowed":49445,"ĠTruly":49446,"577":49447,"Ġsheltered":49448,"rider":49449,"Wo":49450,"Ġlair":49451,"ĠIntelligent":49452,"improve":49453,"matically":49454,"Ġetiquette":49455,"adra":49456,"allo":49457,"ĠJuno":49458,"anything":49459,"ĠStruggle":49460,"ĠPredict":49461,"ĠGrimes":49462,"ĠAMERICA":49463,"ctx":49464,"ĠSituation":49465,"WOOD":49466,"Ġsoluble":49467,"meier":49468,"Ġintolerable":49469,"angering":49470,"Ġuninterrupted":49471,"Ġtooltip":49472,"Ġinterrogated":49473,"Ġgunned":49474,"ĠSneak":49475,"æѦ":49476,"Ġtether":49477,"Ġcrumble":49478,"Lens":49479,"Ġclustered":49480,"ĠSyl":49481,"ĠHasan":49482,"Ġdystopian":49483,"wana":49484,"Ġjoystick":49485,"ĠThib":49486,"ammu":49487,"Tomorrow":49488,"546":49489,"Ġovercame":49490,"Ġminimized":49491,"ceptor":49492,"Runner":49493,"ENGTH":49494,"ĠBrenda":49495,"ĠAchievements":49496,"Ġtorches":49497,"Ġrapport":49498,"ĠInvestigator":49499,"ĠHandling":49500,"relation":49501,"grey":49502,"815":49503,"Ġkcal":49504,"ĠCommands":49505,"dq":49506,"Ġcurls":49507,"Ġbearer":49508,"Ġcynicism":49509,"itri":49510,"ĠUseful":49511,"Bee":49512,"DCS":49513,"Ġabras":49514,"Pract":49515,"BILITIES":49516,"712":49517,"Ġdebugger":49518,"Ġdebtor":49519,"ĠLia":49520,"ĠKers":49521,"Ġexacerbate":49522,"ĠStacy":49523,"ĠBland":49524,"ĠScenes":49525,"Ġbranching":49526,"âĸĪâĸĪâĸĪâĸĪâĸĪâĸĪâĸĪâĸĪ":49527,"apeake":49528,"Ġsalsa":49529,"Ġmishand":49530,"ĠKonami":49531,"ĠNib":49532,"Ġanecdote":49533,"Ġagreeable":49534,"Ïī":49535,"ĠNathaniel":49536,"ĠHeisman":49537,"ĠBeware":49538,"Ġ1886":49539,"spective":49540,"691":49541,"522":49542,"Ġinhibits":49543,"Ġhashing":49544,"Ġ1889":49545,"å°Ĩ":49546,"vich":49547,"Pure":49548,"Ġsolidly":49549,"Ġaspirin":49550,"imaru":49551,"Ġstreetcar":49552,"ĠUCS":49553,"ĠJudd":49554,"Ġflashbacks":49555,"pins":49556,"Ġ1440":49557,"ĠUNHCR":49558,"ĠSymptoms":49559,"TIT":49560,"538":49561,"Fra":49562,"%);":49563,"Ġooz":49564,"Ġcurfew":49565,"Ġcalmed":49566,"Ġparticipates":49567,"TeX":49568,"Ġnonsensical":49569,"Ġfullback":49570,"ĠDeL":49571,"monkey":49572,"hari":49573,"Ġmetabolites":49574,"Ġlooted":49575,"ĠALWAYS":49576,"ĠBCC":49577,"Lt":49578,"ochet":49579,"Bone":49580,"Ġvetoed":49581,"Ġgcc":49582,"ĠCLICK":49583,"Ġ1888":49584,"saf":49585,"Ġstiffness":49586,"Ġlowly":49587,"ĠGeh":49588,"verson":49589,"orset":49590,"Ġunforeseen":49591,"Ġanesthesia":49592,"ĠOptical":49593,"Ġreconstructed":49594,"ĠTup":49595,"shows":49596,"NEWS":49597,"ĠNewspaper":49598,"ĠASA":49599,"tera":49600,"Numbers":49601,"Ġinexplicable":49602,"×ij":49603,"Ġhardness":49604,"untarily":49605,"ĠAcer":49606,"gradient":49607,"ARDIS":49608,"Ġwoodland":49609,"Ġmetaphors":49610,"ĠWembley":49611,"ĠPavel":49612,"philis":49613,"Ġrewriting":49614,"Ġperceptual":49615,"Ġ1070":49616,"worms":49617,"ĠDowns":49618,"Ġunsurprisingly":49619,"Ġtagging":49620,"flame":49621,"Ġlitres":49622,"Ġbounces":49623,"ĠBabe":49624,"shut":49625,"Ġoverdoses":49626,"ĠSheila":49627,"ĠChau":49628,"ĠBless":49629,"Capture":49630,"ĠSignificant":49631,"ĠScion":49632,"Ġ389":49633,"ĠMcH":49634,"ĠTitanium":49635,"ĠMeal":49636,"ameda":49637,"agents":49638,"aggressive":49639,"Billy":49640,"763":49641,"ĠSaying":49642,"DERR":49643,"itone":49644,"Collins":49645,"Bound":49646,"Ġbolted":49647,"ĠDMCA":49648,"953":49649,"Ġuniqueness":49650,"Ġepigen":49651,"unci":49652,"antam":49653,"Ġreckoning":49654,"chairs":49655,"OGR":49656,"ĠSenegal":49657,"Ġ1862":49658,"relevant":49659,"Ġ¯":49660,"Ġpharmacies":49661,"ĠGeral":49662,"vier":49663,"Yan":49664,"ORPG":49665,"Ġrabid":49666,"bending":49667,"ĠUNITED":49668,"Ġ465":49669,"Assembly":49670,"Ġweep":49671,"Ġbehest":49672,"ĠMothers":49673,"ĠJace":49674,"hid":49675,"Ġwhirlwind":49676,"ĠUNIVERS":49677,"Ġutopian":49678,"Ġkidnap":49679,"Philipp":49680,"Kin":49681,"893":49682,"Ġlivestream":49683,"ĠMISS":49684,"Ġsubversive":49685,"ĠTechniques":49686,"ĠJUSTICE":49687,"ĠBASE":49688,"Ġ387":49689,"Ġassailants":49690,"ĠHardcore":49691,"Ġsprinkled":49692,"ĠPse":49693,"éļ":49694,"printed":49695,"ĠHau":49696,"ORGE":49697,"ĠTOUR":49698,"Ġlaced":49699,"Ġitch":49700,"Giving":49701,"Ġported":49702,"781":49703,"////////////////////////////////":49704,"breeding":49705,"Ġlogger":49706,"ĠHOL":49707,"innie":49708,"Firstly":49709,"Ġembryonic":49710,"Ġdelegated":49711,"pai":49712,"OIL":49713,"Ġcentrally":49714,"ĠRx":49715,"ĠScouting":49716,"Dutch":49717,"Ġhereditary":49718,"ĠCruiser":49719,"sat":49720,"529":49721,"ĠMarriott":49722,"othermal":49723,"Ġprohibitions":49724,"Earn":49725,"ĠStab":49726,"ĠColleges":49727,"ĠBelief":49728,"stretched":49729,"ĠLH":49730,"ĠEntityItem":49731,"CIA":49732,"Ġunrem":49733,"Ġlaureate":49734,"Ġdenominations":49735,"summary":49736,"hler":49737,"Spect":49738,"ĠKlaus":49739,"ĠBeans":49740,"Ġinsur":49741,"ĠPAX":49742,"Ġfielder":49743,"ĠVet":49744,"ĠSparrow":49745,"zie":49746,"ĠSQ":49747,"ĠMondays":49748,"ĠOffline":49749,"ĠLerner":49750,"ĠExtensions":49751,"Ireland":49752,"Ġpatronage":49753,"Ġcontrasted":49754,"ĠMania":49755,"hirt":49756,"Moscow":49757,"Ġcondemns":49758,"ĠAnge":49759,"Ġcomposing":49760,"ĠPepe":49761,"ĠPaddock":49762,"Ġheterogeneity":49763,"Ġideologically":49764,"Ġfishes":49765,"Ġcursing":49766,"ĠRutherford":49767,"ĠFloating":49768,"ĠAmelia":49769,"Tea":49770,"Synopsis":49771,"Ġstunts":49772,"Ġbead":49773,"Ġstocking":49774,"ĠMILL":49775,"obook":49776,"massive":49777,"\\<":49778,"Ġhump":49779,"ĠPreferences":49780,"EngineDebug":49781,"geist":49782,"ĠNieto":49783,"omever":49784,"ishy":49785,"evaluate":49786,"colonial":49787,"Alternative":49788,"ĠGoPro":49789,"ĠVortex":49790,"ĠNETWORK":49791,"ansky":49792,"Secure":49793,"ĠThrust":49794,"Snake":49795,"Ġparcels":49796,"Ġsamurai":49797,"Ġactresses":49798,"Nap":49799,"MF":49800,"iferation":49801,"Beer":49802,"523":49803,"ĠIly":49804,"ointment":49805,"Ping":49806,"Ġstriped":49807,"ĠMellon":49808,"ossession":49809,"Ġneutron":49810,"endium":49811,"Ġaph":49812,"ĠFlavoring":49813,"Ġ383":49814,"Ġresponsiveness":49815,"ĠJindal":49816,"ĠHitchcock":49817,"Denver":49818,"ĠDRAGON":49819,"smanship":49820,"ĠDupl":49821,"Ġsly":49822,"Ġwebcam":49823,"ĠTwain":49824,"ĠDarling":49825,"iliate":49826,"consumer":49827,"DIT":49828,"Ġnamesake":49829,"Ġunorthodox":49830,"Ġfuner":49831,"ĠPLoS":49832,"ĠCONTROL":49833,"ozyg":49834,"oglobin":49835,"FACE":49836,"ERG":49837,"ĠDia":49838,"ĠFiesta":49839,"cele":49840,"034":49841,"Ġenclave":49842,"âĸ¬âĸ¬":49843,"onement":49844,"alist":49845,"Mand":49846,"Ġhomegrown":49847,"ĠFancy":49848,"Ġconceptions":49849,"ĠContains":49850,"ureen":49851,"Ġreiterate":49852,"Ġmeager":49853,"Ġinstallments":49854,"Spawn":49855,"627":49856,"Ġphotoc":49857,"ĠCabrera":49858,"ĠRosenthal":49859,"ĠLansing":49860,"isner":49861,"Ġinvests":49862,"ĠUFOs":49863,"EXP":49864,"Hardware":49865,"Ġtragically":49866,"Ġconcedes":49867,"ieft":49868,"cham":49869,"borgh":49870,"ĠSchr":49871,"ĠMelanie":49872,"ĠHoy":49873,"Ġvisitation":49874,"Ġidiosyncr":49875,"Ġfractions":49876,"Ġforeskin":49877,"obos":49878,"Ġpoaching":49879,"ĠVIEW":49880,"Ġstimulates":49881,"ĠGork":49882,"canon":49883,"MIC":49884,"ĠNemesis":49885,"ĠIndra":49886,"ĠDMV":49887,"Ġ529":49888,"Ġinspecting":49889,"Ġgrandma":49890,"ĠWhedon":49891,"ĠShant":49892,"ĠPurg":49893,"ikan":49894,"ĠTeg":49895,"ĠCLR":49896,"zac":49897,"Victoria":49898,"ĠVerify":49899,"ionics":49900,"Ġpartying":49901,"ĠMou":49902,"colour":49903,"Ġtestimonies":49904,"lations":49905,"Ġpressuring":49906,"hiro":49907,"acers":49908,"Ġfid":49909,"angler":49910,"ĠCSI":49911,"Ġhereafter":49912,"Ġdissidents":49913,"reporting":49914,"iphany":49915,"chev":49916,"Ġsolitude":49917,"Ġlobe":49918,"Ġindis":49919,"Ġcredential":49920,"recent":49921,"adult":49922,"ĠNirvana":49923,"ĠFranchise":49924,"Layer":49925,"Hyp":49926,"ĠBerkshire":49927,"Ġwills":49928,"tif":49929,"Ġtotem":49930,"ĠJudah":49931,"repair":49932,"Instant":49933,"548":49934,"Ġembassies":49935,"Ġbottleneck":49936,"Ġbount":49937,"Ġtypew":49938,"ĠAlvin":49939,"jing":49940,"imilar":49941,"Rush":49942,"Ġbrim":49943,"ĠHELP":49944,"Aim":49945,"]'":49946,"Ġpassively":49947,"Ġbounded":49948,"ĠRated":49949,"Ġcriminality":49950,"Ġbiomark":49951,"Ġdispatcher":49952,"ĠTowards":49953,"Ġ+++":49954,"righteous":49955,"frog":49956,"ĠPanc":49957,"Carter":49958,"032":49959,"æ©Ł":49960,"Ġultraviolet":49961,"ĠLicensed":49962,"ĠTata":49963,"ĠBlessing":49964,"ĠGAM":49965,"Ġchemically":49966,"ĠSeaf":49967,"ĠRELE":49968,"ĠMercenary":49969,"capitalist":49970,"Ġformulations":49971,"Ġannihilation":49972,"ĠVerb":49973,"ĠArgon":49974,"Ġunloaded":49975,"Ġmorphed":49976,"Ġconquering":49977,"backer":49978,"IELD":49979,"Ġthefts":49980,"Ġfrontrunner":49981,"ĠRoyale":49982,"ĠFundamental":49983,"elight":49984,"Chip":49985,"necessary":49986,"ayn":49987,"ĠSlip":49988,"Ġ448":49989,"cerned":49990,"Pause":49991,"Ġshockingly":49992,"ĠABV":49993,"Ġcomposure":49994,"733":49995,"ĠMotorsport":49996,"ahime":49997,"Murray":49998,"Mach":49999,"Ġgrids":50000,"Ġdebian":50001,"Ġfurthermore":50002,"Ġdexterity":50003,"ĠCollections":50004,"oslov":50005,"ilage":50006,"bj":50007,"ĠMonteneg":50008,"ĠstrutConnector":50009,"Ġmassacres":50010,"Ġbriefs":50011,"fetched":50012,"uvian":50013,"olition":50014,"Failure":50015,"emonic":50016,"Ġflared":50017,"Ġclaimant":50018,"Ġcures":50019,"Ġgiveaways":50020,"ĠSubstance":50021,"alions":50022,"Ġcringe":50023,"ĠKul":50024,"Ġaristocracy":50025,"ĠUlster":50026,"olated":50027,"housing":50028,"ĠMIS":50029,"Ġglared":50030,"ĠWilhelm":50031,"needs":50032,"lambda":50033,"builders":50034,"ĠVIS":50035,"Ġradiator":50036,"ĠGhostbusters":50037,"Ġ436":50038,"actual":50039,"Ġherds":50040,"ça":50041,"watching":50042,"Ġcountering":50043,"Charge":50044,"Ġcharred":50045,"Ġwarheads":50046,"Ġiodine":50047,"ĠMacy":50048,"041":50049,"Ġdepartures":50050,"ĠSins":50051,"Ġdyed":50052,"ĠConcepts":50053,"gado":50054,"713":50055,"Ġquotations":50056,"Ġgist":50057,"ĠChristy":50058,"Ġantigen":50059,"ĠHemp":50060,"ĠDrawn":50061,"ĠBarg":50062,"ezvous":50063,"Ġpaternity":50064,"Ġardu":50065,"ĠAnchorage":50066,"ĠRik":50067,"Ġoverloaded":50068,"ĠUsername":50069,"ĠTammy":50070,"ĠNau":50071,"ĠCellular":50072,"Ġwaning":50073,"Ġrodent":50074,"ĠWorcester":50075,"ilts":50076,"ĠTad":50077,"Ġdwellings":50078,"Ġbullish":50079,"431":50080,"Ġretaliate":50081,"Ġmigraine":50082,"ĠChevron":50083,"CHECK":50084,"Ġdonkey":50085,"crim":50086,"SPA":50087,"ĠAnalog":50088,"Ġmarquee":50089,"ĠHaas":50090,"Bir":50091,"ĠGDDR":50092,"ĠDownloads":50093,"Ġwillpower":50094,"ĠForth":50095,"ĠRecorded":50096,"Ġimpossibility":50097,"ĠLogged":50098,"ĠFranks":50099,"ĠRatt":50100,"initions":50101,"Ġcleaners":50102,"Ġsorely":50103,"Ġflickering":50104,"ĠExamination":50105,"catching":50106,"alloween":50107,"Msg":50108,"Ġdunno":50109,"Fa":50110,"Ġdysph":50111,"crazy":50112,".''.":50113,"Ġmainline":50114,"Ġcs":50115,"Ġptr":50116,"ĠWally":50117,"igun":50118,"951":50119,"ĠBigfoot":50120,"fights":50121,"Ġretrieving":50122,"Jr":50123,"Ġduplication":50124,"ĠExplan":50125,"Ġrelational":50126,"Ġquaint":50127,"Ġbiscuits":50128,"Ġado":50129,"Ġshudder":50130,"Ġantidote":50131,"blooded":50132,"ksh":50133,"Ġsauces":50134,"Ġreinvest":50135,"Ġdispensary":50136,"ĠDiver":50137,"Ġ9000":50138,"student":50139,"Ġinsepar":50140,"escap":50141,"Ġtoddlers":50142,"ĠGPIO":50143,"ĠAssignment":50144,"headers":50145,"Ġlackluster":50146,"Ġaback":50147,"956":50148,"Ġtoolbar":50149,"745":50150,"Ġoust":50151,"Ġcontemplation":50152,"ĠPRESIDENT":50153,"Ġ458":50154,"======":50155,"Ġguaranteeing":50156,"ĠHeist":50157,"ĠCannes":50158,"Ļ½":50159,"Ġcollaborator":50160,"ĠAmp":50161,"Ġgou":50162,"ĠSHALL":50163,"stories":50164,"783":50165,"Ġmobilized":50166,"Ġbrood":50167,"ĠLU":50168,"ĠðŁij":50169,"Ġrefin":50170,"ĠAnthropology":50171,"vind":50172,"illi":50173,"Ġwarranties":50174,"ĠBabel":50175,"Ġswath":50176,"Ġcaches":50177,"Ġantagonists":50178,"artifacts":50179,"Ġhotly":50180,"ĠStarts":50181,"ĠGö":50182,"zag":50183,"!!!!!":50184,"Ġscourge":50185,"Ġconspiring":50186,"ruits":50187,"reverse":50188,"ĠSheen":50189,"ĠJesuit":50190,"ĠGiovanni":50191,"adies":50192,"Ġbuttocks":50193,"earcher":50194,"acan":50195,"Ġvolleyball":50196,"Ġshrouded":50197,"Ġscoreboard":50198,"bats":50199,"ĠIPM":50200,"Ġasses":50201,"Ġderegulation":50202,"ĠTelegram":50203,"ĠReboot":50204,"Ġ7000":50205,"ĠCanary":50206,"Ġkernels":50207,"ĠFrançois":50208,"ĠDuff":50209,"ĠPon":50210,"ĠLeica":50211,"ĠGarmin":50212,"Ġorphans":50213,"ĠClaudia":50214,"Ġcalendars":50215,"ĠLeilan":50216,"ento":50217,"Rocket":50218,"Ġbrunch":50219,"ĠHawking":50220,"ainers":50221,"Ġsensibilities":50222,"ĠkW":50223,"ĠKand":50224,"Ġreclaimed":50225,"Ġinterestingly":50226,"ש":50227,"romy":50228,"JM":50229,"ĠEnhancement":50230,"bush":50231,"Skip":50232,"Ġrappers":50233,"Ġgazing":50234,"pedia":50235,"athlon":50236,"Revolution":50237,"Ġsnipers":50238,"Ġreverted":50239,"Ġconglomerate":50240,"Terry":50241,"794":50242,"Ġharsher":50243,"Ġdesolate":50244,"ĠHitman":50245,"Commission":50246,"Ġ(/":50247,"âĢ¦.\"":50248,"Compar":50249,"Ġamplification":50250,"ominated":50251,"Ġregress":50252,"ĠCollider":50253,"Ġinformants":50254,"Ġgazed":50255,"<|endoftext|>":50256} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e40ddde7a04b51faaf86f99bcb80caeeb69a4959 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,144 @@ +accelerate==0.20.3 +aiofiles==23.2.1 +aiohttp==3.9.1 +aiosignal==1.3.1 +altair==5.2.0 +annotated-types==0.6.0 +antlr4-python3-runtime==4.9.3 +anyio==3.7.1 +appdirs==1.4.4 +asttokens==2.4.1 +async-timeout==4.0.3 +attrs==23.1.0 +bitsandbytes==0.37.0 +braceexpand==0.1.7 +certifi==2023.11.17 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.28.1 +comm==0.2.0 +contourpy==1.2.0 +cycler==0.12.1 +datasets==2.15.0 +debugpy==1.8.0 +decorator==5.1.1 +dill==0.3.7 +docker-pycreds==0.4.0 +einops==0.7.0 +exceptiongroup==1.2.0 +executing==2.0.1 +fastapi==0.105.0 +ffmpy==0.3.1 +filelock==3.13.1 +fonttools==4.46.0 +frozenlist==1.4.1 +fsspec==2023.10.0 +gitdb==4.0.11 +gitpython==3.1.40 +gradio==3.47.1 +gradio-client==0.6.0 +h11==0.14.0 +httpcore==1.0.2 +httpx==0.25.2 +huggingface-hub==0.19.4 +idna==3.6 +imageio==2.33.1 +importlib-metadata==7.0.0 +importlib-resources==6.1.1 +iopath==0.1.10 +ipykernel==6.27.1 +ipython==8.18.1 +jedi==0.19.1 +jinja2==3.1.2 +joblib==1.3.2 +jsonschema==4.20.0 +jsonschema-specifications==2023.11.2 +jupyter-client==8.6.0 +jupyter-core==5.5.1 +kiwisolver==1.4.5 +lazy-loader==0.3 +lit==17.0.6 +markupsafe==2.1.3 +matplotlib==3.7.0 +matplotlib-inline==0.1.6 +mpmath==1.3.0 +multidict==6.0.4 +multiprocess==0.70.15 +nest-asyncio==1.5.8 +networkx==3.2.1 +nltk==3.8.1 +numpy==1.26.2 + +omegaconf==2.3.0 +opencv-python==4.7.0.72 +orjson==3.9.10 +packaging==23.2 +pandas==2.1.4 +parso==0.8.3 +peft==0.2.0 +pexpect==4.9.0 +pillow==10.1.0 +platformdirs==4.1.0 +portalocker==2.8.2 +progressbar2==4.3.0 +prompt-toolkit==3.0.43 +protobuf==4.25.1 +psutil==5.9.4 +ptyprocess==0.7.0 +pure-eval==0.2.2 +pyarrow==14.0.2 +pyarrow-hotfix==0.6 +pydantic==2.5.2 +pydantic-core==2.14.5 +pydub==0.25.1 +pygments==2.17.2 +pyparsing==3.1.1 +python-dateutil==2.8.2 +python-multipart==0.0.6 +python-utils==3.8.1 +pytz==2023.3.post1 +pyyaml==6.0 +pyzmq==25.1.2 +referencing==0.32.0 +regex==2022.10.31 +requests==2.31.0 +rpds-py==0.15.2 +safetensors==0.4.1 +scikit-image==0.22.0 +scikit-learn==1.3.2 +scipy==1.11.4 +semantic-version==2.10.0 +sentence-transformers==2.2.2 +sentencepiece==0.1.99 +sentry-sdk==1.39.1 +setproctitle==1.3.3 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.0 +stack-data==0.6.3 +starlette==0.27.0 +sympy==1.12 +threadpoolctl==3.2.0 +tifffile==2023.12.9 +timm==0.6.13 +tokenizers==0.15.0 +toolz==0.12.0 +torch==2.0.0 +torchaudio==2.0.1 +torchvision==0.15.1 +tornado==6.4 +tqdm==4.64.1 +traitlets==5.14.0 +transformers==4.36.2 +typing-extensions==4.9.0 +tzdata==2023.3 +urllib3==2.1.0 +uvicorn==0.24.0.post1 +visual-genome==1.1.1 +wandb==0.16.1 +wcwidth==0.2.12 +webdataset==0.2.48 +websockets==11.0.3 +xxhash==3.4.1 +yarl==1.9.4 +zipp==3.17.0 \ No newline at end of file diff --git a/train_configs/tinygptv_stage1.yaml b/train_configs/tinygptv_stage1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..382ea08c8201ebc93d490d5f4348df5add2176da --- /dev/null +++ b/train_configs/tinygptv_stage1.yaml @@ -0,0 +1,57 @@ +model: + arch: minigpt4 + model_type: pretrain_vicuna0 + +datasets: + laion: + batch_size: 64 + vis_processor: + train: + name: "blip2_image_train" + image_size: 224 + text_processor: + train: + name: "blip_caption" + sample_ratio: 115 + cc_sbu: + batch_size: 64 + vis_processor: + train: + name: "blip2_image_train" + image_size: 224 + text_processor: + train: + name: "blip_caption" + sample_ratio: 14 + + +run: + task: image_text_pretrain + # optimizer + lr_sched: "linear_warmup_cosine_lr" + init_lr: 1e-4 + min_lr: 8e-5 + warmup_lr: 1e-6 + + weight_decay: 0.05 + max_epoch: 1 + num_workers: 4 + warmup_steps: 1000 + iters_per_epoch: 5000 + + seed: 42 + output_dir: "output/minigpt4_stage1_pretrain" + + amp: True + resume_ckpt_path: null + + evaluate: False + train_splits: ["train"] + + device: "cuda" + world_size: 1 + dist_url: "env://" + distributed: True + + wandb_log: True + job_name: minigpt4_pretrain diff --git a/train_configs/tinygptv_stage2.yaml b/train_configs/tinygptv_stage2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34787ca5628eaceb98991ed3b4346ddc6804b6d8 --- /dev/null +++ b/train_configs/tinygptv_stage2.yaml @@ -0,0 +1,61 @@ +model: + arch: minigpt4 + model_type: pretrain_vicuna0 + ckpt: '' + use_grad_checkpoint: True + lora_r: 64 + lora_alpha: 16 + +datasets: + laion: + batch_size: 64 + vis_processor: + train: + name: "blip2_image_train" + image_size: 224 + text_processor: + train: + name: "blip_caption" + sample_ratio: 115 + cc_sbu: + batch_size: 64 + vis_processor: + train: + name: "blip2_image_train" + image_size: 224 + text_processor: + train: + name: "blip_caption" + sample_ratio: 14 + + +run: + task: image_text_pretrain + # optimizer + lr_sched: "linear_warmup_cosine_lr" + init_lr: 1e-4 + min_lr: 8e-5 + warmup_lr: 1e-6 + + weight_decay: 0.05 + max_epoch: 4 + num_workers: 4 + warmup_steps: 5000 + iters_per_epoch: 5000 + + seed: 42 + output_dir: "output/minigpt4_stage1_pretrain" + + amp: True + resume_ckpt_path: null + + evaluate: False + train_splits: ["train"] + + device: "cuda" + world_size: 1 + dist_url: "env://" + distributed: True + + wandb_log: True + job_name: minigpt4_pretrain diff --git a/train_configs/tinygptv_stage3.yaml b/train_configs/tinygptv_stage3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02ba7fcf20e21f5d0869433da91f17dcf73f4114 --- /dev/null +++ b/train_configs/tinygptv_stage3.yaml @@ -0,0 +1,52 @@ +model: + arch: minigpt4 + model_type: pretrain_vicuna0 + + max_txt_len: 160 + end_sym: "###" + prompt_path: "prompts/alignment.txt" + prompt_template: '###Human: {} ###Assistant: ' + ckpt: '' + + +datasets: + cc_sbu_align: + batch_size: 6 + vis_processor: + train: + name: "blip2_image_train" + image_size: 224 + text_processor: + train: + name: "blip_caption" + +run: + task: image_text_pretrain + # optimizer + lr_sched: "linear_warmup_cosine_lr" + init_lr: 3e-5 + min_lr: 1e-5 + warmup_lr: 1e-6 + + weight_decay: 0.05 + max_epoch: 5 + iters_per_epoch: 200 + num_workers: 4 + warmup_steps: 200 + + seed: 42 + output_dir: "/root/autodl-tmp/output/minigpt4_stage2_finetune" + + amp: True + resume_ckpt_path: null + + evaluate: False + train_splits: ["train"] + + device: "cuda" + world_size: 1 + dist_url: "env://" + distributed: True + + wandb_log: True + job_name: minigpt4_finetune \ No newline at end of file diff --git a/train_configs/tinygptv_stage4.yaml b/train_configs/tinygptv_stage4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d577f0f995d2f8f448c142c32a47a38c08f0755 --- /dev/null +++ b/train_configs/tinygptv_stage4.yaml @@ -0,0 +1,294 @@ +model: + arch: minigpt_v2 + model_type: pretrain + max_txt_len: 1024 + image_size: 448 + end_sym: "###" + llama_model: "/root/autodl-tmp/phi-new" + ckpt: "/root/autodl-tmp/output/minigpt4_stage2_finetune/20231224231/checkpoint_4.pth" + use_grad_checkpoint: True + chat_template: True + lora_r: 64 + lora_alpha: 16 + +datasets: + multitask_conversation: + batch_size: 1 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 50 + + llava_conversation: + batch_size: 1 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 30 + + unnatural_instruction: + batch_size: 1 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 10 + + + refvg: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 40 + + llava_detail: + batch_size: 2 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 20 + + llava_reason: + batch_size: 2 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 80 + + + flickr_grounded_caption: + batch_size: 1 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 80 + + flickr_CaptionToPhrase: + batch_size: 1 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 80 + + flickr_ObjectToPhrase: + batch_size: 1 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 80 + + # coco_caption: + # batch_size: 3 + # vis_processor: + # train: + # name: "blip2_image_train" + # image_size: 448 + # text_processor: + # train: + # name: "blip_caption" + # sample_ratio: 10 + + + textcaps_caption: # + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 30 + + refcoco: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 25 + + + refcocop: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 25 + + refcocog: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 25 + + + + invrefcoco: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 10 + + invrefcocop: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 10 + + invrefcocog: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 10 + + + coco_vqa: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 15 + + ok_vqa: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 8 + + aok_vqa: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 12 + + gqa: + batch_size: 3 + vis_processor: + train: + name: "blip2_image_train" + image_size: 448 + text_processor: + train: + name: "blip_caption" + sample_ratio: 50 + + # ocrvqa: + # batch_size: 3 + # vis_processor: + # train: + # name: "blip2_image_train" + # image_size: 448 + # text_processor: + # train: + # name: "blip_caption" + # sample_ratio: 30 + + +run: + task: image_text_pretrain + # optimizer + lr_sched: "linear_warmup_cosine_lr" + init_lr: 1e-5 + min_lr: 8e-5 + warmup_lr: 1e-6 + + weight_decay: 0.05 + max_epoch: 50 + num_workers: 6 + warmup_steps: 1000 + iters_per_epoch: 1000 + + seed: 42 + output_dir: "/root/autodl-tmp/output" + + amp: True + resume_ckpt_path: null + + evaluate: False + train_splits: ["train"] + + device: "cuda" + world_size: 1 + dist_url: "env://" + distributed: True + + wandb_log: True + job_name: minigptv2_finetune \ No newline at end of file